mirror of
https://github.com/awakesecurity/hocker.git
synced 2024-11-22 02:12:35 +03:00
Refactoring polish and haddocks documentation refactor (#7)
* Refactoring polish and haddocks documentation refactor * Remove tested with 7.10.2 until we actually test it * Remove the TODO header as most of that is done now
This commit is contained in:
parent
f6f72e617b
commit
aab52668e5
18
README.md
18
README.md
@ -101,21 +101,3 @@ also do not supply a `--registry` flag then the tools assume you wish to make a
|
||||
request to the public docker hub registry, in which case they ask for a
|
||||
short-lived authentication token from the registry auth server and then make the
|
||||
request to the public docker hub registry.
|
||||
|
||||
# TODO
|
||||
- [X] ~Get a nix-build workflow working for hocker~
|
||||
- [ ] Work on a nix-shell based dev workflow
|
||||
- [ ] Document types in `Exceptions`, `ErrorHandling`, etc.
|
||||
- [x] ~Rename the `Types/Extra.hs` module, that's poorly named~ (I got rid of it)
|
||||
- [x] ~Write an updated and accurate README introduction~
|
||||
- [X] Rename `ContainerName` and `ContainerTag` to `ImageName` and `ImageTag` to
|
||||
be more consistent with the correct docker terminology
|
||||
- [x] ~Remove the run prefix from most of the `V1_2.hs` module functions~ (replaced with a `do` prefix)
|
||||
- [X] ~Use HockerException in docker2nix's lib functions~
|
||||
- [x] ~Better document the types and function signatures in `Nix/FetchDocker.hs`~
|
||||
- [X] L258 fix docker-layer to hocker-layer
|
||||
- [ ] Proofread comments
|
||||
- [ ] `Data/Docker/Image/Types.hs` can probably move to a more general location
|
||||
I think
|
||||
- [ ] Use friendly module prefixing more consistently and cleanup usage
|
||||
- [ ] Strip out the unused docker image V1 code
|
||||
|
@ -21,16 +21,16 @@
|
||||
|
||||
module Main where
|
||||
|
||||
import Data.ByteString.Lazy.Char8 as C8L
|
||||
import Data.Maybe (fromMaybe)
|
||||
import qualified Data.Text as T
|
||||
import Data.ByteString.Lazy.Char8 as C8L
|
||||
import Data.Maybe (fromMaybe)
|
||||
import Data.Text (Text)
|
||||
import Options.Generic
|
||||
import System.IO (hWaitForInput, stdin)
|
||||
import System.IO (hWaitForInput, stdin)
|
||||
|
||||
import Data.Docker.Image.Types
|
||||
import Data.Docker.Nix.FetchDocker as Nix.FetchDocker
|
||||
import Data.Docker.Nix.FetchDocker as Nix.FetchDocker
|
||||
import Lib
|
||||
import Network.Wreq.Docker.Registry.V2 as Docker.Registry
|
||||
import Network.Wreq.Docker.Registry as Docker.Registry
|
||||
import Types
|
||||
import Types.ImageName
|
||||
import Types.ImageTag
|
||||
@ -45,7 +45,7 @@ data ProgArgs w = ProgArgs
|
||||
<?> "Fetch image manifest from a path on the filesystem"
|
||||
-- | Alternative docker image name made available in the Nix
|
||||
-- expression fetchdocker derivation
|
||||
, altImageName :: w ::: Maybe T.Text
|
||||
, altImageName :: w ::: Maybe Text
|
||||
<?> "Alternate image name provided in the `fetcdocker` derivation"
|
||||
-- | Docker image name (includes the reponame, e.g: library/debian)
|
||||
, name :: ImageName
|
||||
@ -57,12 +57,12 @@ data ProgArgs w = ProgArgs
|
||||
instance ParseRecord (ProgArgs Wrapped)
|
||||
deriving instance Show (ProgArgs Unwrapped)
|
||||
|
||||
progSummary :: T.Text
|
||||
progSummary :: Text
|
||||
progSummary = "Produce a Nix expression given a manifest for a docker image via stdin or via a filepath"
|
||||
|
||||
main :: IO ()
|
||||
main = unwrapRecord progSummary >>= \ProgArgs{..} -> do
|
||||
let (imageRepo, imageName) = Lib.splitImgName name
|
||||
let (imageRepo, imageName) = Lib.splitRepository name
|
||||
dockerRegistry = fromMaybe defaultRegistry registry
|
||||
|
||||
manifestJSON <-
|
||||
|
@ -15,13 +15,13 @@
|
||||
|
||||
module Main where
|
||||
|
||||
import Data.Maybe (fromMaybe)
|
||||
import Data.Maybe (fromMaybe)
|
||||
import qualified Data.Text
|
||||
import Options.Generic
|
||||
|
||||
import Lib
|
||||
import Network.Wreq.Docker.Image.V1_2 as Docker.Image
|
||||
import Network.Wreq.Docker.Registry.V2
|
||||
import Network.Wreq.Docker.Image as Docker.Image
|
||||
import Network.Wreq.Docker.Registry
|
||||
import Types
|
||||
|
||||
progSummary :: Data.Text.Text
|
||||
|
@ -15,15 +15,15 @@
|
||||
|
||||
module Main where
|
||||
|
||||
import Data.Maybe (fromMaybe)
|
||||
import Data.Maybe (fromMaybe)
|
||||
import qualified Data.Text
|
||||
import Data.Text.IO as TIO
|
||||
import Data.Text.IO as TIO
|
||||
import Options.Generic
|
||||
import System.IO.Temp as Tmp
|
||||
import System.IO.Temp as Tmp
|
||||
|
||||
import Lib
|
||||
import Network.Wreq.Docker.Image.V1_2 as Docker.Image
|
||||
import Network.Wreq.Docker.Registry.V2
|
||||
import Network.Wreq.Docker.Image as Docker.Image
|
||||
import Network.Wreq.Docker.Registry
|
||||
import Types
|
||||
|
||||
progSummary :: Data.Text.Text
|
||||
@ -35,7 +35,7 @@ main = unwrapRecord progSummary >>= \Options{..} -> do
|
||||
|
||||
auth <- mkAuth dockerRegistry imageName credentials
|
||||
img <- withSystemTempDirectory "hocker-image-XXXXXX" $ \d ->
|
||||
Docker.Image.fetchAndAssemble $
|
||||
Docker.Image.fetchImage $
|
||||
HockerMeta
|
||||
{ outDir = Just d
|
||||
, imageLayer = Nothing
|
||||
|
@ -21,19 +21,19 @@
|
||||
|
||||
module Main where
|
||||
|
||||
import qualified Crypto.Hash as Hash
|
||||
import Data.Maybe (fromMaybe)
|
||||
import qualified Crypto.Hash as Hash
|
||||
import Data.Maybe (fromMaybe)
|
||||
import qualified Data.Text
|
||||
import Options.Generic
|
||||
|
||||
import Lib
|
||||
import Network.Wreq.Docker.Image.V1_2 as Docker.Image
|
||||
import Network.Wreq.Docker.Registry.V2
|
||||
import Network.Wreq.Docker.Image as Docker.Image
|
||||
import Network.Wreq.Docker.Registry
|
||||
import Types
|
||||
import Types.Hash ()
|
||||
import Types.Hash ()
|
||||
import Types.ImageName
|
||||
import Types.ImageTag
|
||||
import Types.URI ()
|
||||
import Types.URI ()
|
||||
|
||||
data ProgArgs w = ProgArgs
|
||||
{ -- | URI for the registry, optional
|
||||
|
@ -15,13 +15,13 @@
|
||||
|
||||
module Main where
|
||||
|
||||
import Data.Maybe (fromMaybe)
|
||||
import Data.Maybe (fromMaybe)
|
||||
import qualified Data.Text
|
||||
import Options.Generic
|
||||
|
||||
import Lib
|
||||
import Network.Wreq.Docker.Image.V1_2 as Docker.Image
|
||||
import Network.Wreq.Docker.Registry.V2
|
||||
import Network.Wreq.Docker.Image as Docker.Image
|
||||
import Network.Wreq.Docker.Registry
|
||||
import Types
|
||||
|
||||
progSummary :: Data.Text.Text
|
||||
|
44
hocker.cabal
44
hocker.cabal
@ -1,17 +1,39 @@
|
||||
name: hocker
|
||||
version: 0.1.0.0
|
||||
synopsis: CLI tools and library to interact with a V2 Docker Registry
|
||||
description: Please see README.md
|
||||
version: 1.0.0
|
||||
synopsis: Utilities for interacting with the docker registry and generating nix build instructions
|
||||
homepage: https://github.com/awakenetworks/hocker#readme
|
||||
Bug-Reports: https://github.com/awakenetworks/hocker/issues
|
||||
license: Apache-2.0
|
||||
license-file: LICENSE
|
||||
author: Awake networks
|
||||
maintainer: opensource@awakenetworks.com
|
||||
copyright: 2016 Awake Networks
|
||||
category: Web
|
||||
category: Utilities
|
||||
build-type: Simple
|
||||
extra-source-files: LICENSE
|
||||
cabal-version: >=1.10
|
||||
Tested-With: GHC == 8.0.1
|
||||
Description:
|
||||
@hocker@ is a suite of command line utilities and a library for:
|
||||
.
|
||||
* fetching a docker image
|
||||
* fetching a layer of an image
|
||||
* fetching an image's configuration
|
||||
* fetching a docker registry image manifest
|
||||
* generating nix build instructions from a registry image manifest
|
||||
.
|
||||
The motivation for this tool came from a need to fetch docker
|
||||
image artifacts from a docker registry without the stock docker
|
||||
tooling that is designed to only work with the docker daemon.
|
||||
.
|
||||
These tools /only/ work with version 2 of the docker registry and
|
||||
docker version (>=) 1.10.
|
||||
.
|
||||
For a complete set of usage examples please see the project's <https://github.com/awakenetworks/hocker#readme README.md>.
|
||||
|
||||
source-repository head
|
||||
type: git
|
||||
location: https://github.com/awakenetworks/hocker
|
||||
|
||||
library
|
||||
ghc-options: -Wall
|
||||
@ -29,12 +51,9 @@ library
|
||||
Data.Docker.Nix.FetchDocker,
|
||||
Data.Docker.Image.Types,
|
||||
Data.Docker.Image.AesonHelpers,
|
||||
Data.Docker.Image.V1.Layer,
|
||||
Data.Docker.Image.V1.Types,
|
||||
Data.Docker.Image.V1_2.Types,
|
||||
Network.Wreq.ErrorHandling,
|
||||
Network.Wreq.Docker.Registry.V2,
|
||||
Network.Wreq.Docker.Image.V1_2,
|
||||
Network.Wreq.Docker.Registry,
|
||||
Network.Wreq.Docker.Image,
|
||||
Network.Wreq.Docker.Image.Lib
|
||||
build-depends:
|
||||
base >= 4.9 && < 5,
|
||||
@ -189,8 +208,7 @@ test-suite hocker-tests
|
||||
hs-source-dirs: test
|
||||
main-is: Main.hs
|
||||
other-modules:
|
||||
Tests.Data.Docker.Image.V1,
|
||||
Tests.Data.Docker.Image.V1_2,
|
||||
Tests.Data.Docker.Image,
|
||||
Tests.Data.Docker.Nix.FetchDocker
|
||||
build-depends:
|
||||
base >= 4.9 && < 5,
|
||||
@ -214,7 +232,3 @@ test-suite hocker-tests
|
||||
|
||||
ghc-options: -threaded -rtsopts -with-rtsopts=-N
|
||||
default-language: Haskell2010
|
||||
|
||||
source-repository head
|
||||
type: git
|
||||
location: https://github.com/awakenetworks/hocker
|
||||
|
@ -1,6 +1,7 @@
|
||||
{-# LANGUAGE LambdaCase #-}
|
||||
{-# LANGUAGE OverloadedStrings #-}
|
||||
{-# LANGUAGE RecordWildCards #-}
|
||||
{-# LANGUAGE TemplateHaskell #-}
|
||||
{-# LANGUAGE TupleSections #-}
|
||||
|
||||
-----------------------------------------------------------------------------
|
||||
@ -14,18 +15,22 @@
|
||||
|
||||
module Data.Docker.Image.Types where
|
||||
|
||||
import Data.ByteString.Lazy.Char8 as C8L
|
||||
import qualified Data.Text as T
|
||||
import qualified Crypto.Hash as Hash
|
||||
import Data.Aeson
|
||||
import Data.Aeson.TH
|
||||
import Data.Aeson.Types
|
||||
import qualified Data.ByteArray as BA
|
||||
import qualified Data.ByteArray.Encoding as BA
|
||||
import qualified Data.ByteString.Char8 as C8
|
||||
import Data.ByteString.Lazy.Char8 as C8L
|
||||
import Data.HashMap.Strict as H
|
||||
import Data.Text (Text)
|
||||
|
||||
import Data.Docker.Image.AesonHelpers
|
||||
import Types
|
||||
import Types.ImageTag
|
||||
|
||||
-- | Record of all the metadata we need for a docker image; this
|
||||
-- includes the basics like registry location, image repository name,
|
||||
-- image name, image tag, a possible alternative image name, and
|
||||
-- finally the full manifest JSON for the docker image from which a
|
||||
-- complete image can be constructed (supplying the config JSON and
|
||||
-- references to all of the layers).
|
||||
-- | Metadata needed for constructing a docker image.
|
||||
data HockerImageMeta = HockerImageMeta
|
||||
{ -- | Docker image repo, the first part of a repository+name
|
||||
-- separated by a "/"; e.g: library/debian.
|
||||
@ -35,18 +40,109 @@ data HockerImageMeta = HockerImageMeta
|
||||
, imageName :: ImageNamePart
|
||||
-- | Docker image tag
|
||||
, imageTag :: ImageTag
|
||||
|
||||
-- | A docker image manifest JSON blob as usually fetched from a
|
||||
-- docker registry.
|
||||
--
|
||||
-- TODO: switch this to the JSON AST type?
|
||||
, manifestJSON :: C8L.ByteString
|
||||
-- | The URI (even if the default public registry) of the docker
|
||||
-- registry.
|
||||
, dockerRegistry :: RegistryURI
|
||||
-- | An alternative name for the docker image provided in the
|
||||
-- output Nix `fetchdocker` derivation expressions. Not replacing
|
||||
-- @imageName@ but providing a method for declaring up-front a
|
||||
-- possibly cleaner or more intuitive name for use within Nix.
|
||||
, altImageName :: Maybe T.Text
|
||||
-- | An alternative name for the docker image in the generated nix
|
||||
-- build instructions.
|
||||
, altImageName :: Maybe Text
|
||||
} deriving (Show)
|
||||
|
||||
-- | Parse a 'C8.ByteString' into a 'Hash.SHA256'.
|
||||
--
|
||||
-- A digest value, as seen in the docker registry manifest, is the
|
||||
-- hexadecimal encoding of a hashing function's digest with the
|
||||
-- hashing function identifier prefixed onto the string. At this time
|
||||
-- the only prefix used is @sha256:@.
|
||||
toDigest :: C8.ByteString -> Maybe (Hash.Digest Hash.SHA256)
|
||||
toDigest = from . C8.break (== ':')
|
||||
where
|
||||
from ("sha256", r) = either (const Nothing) Hash.digestFromByteString . toBytes $ C8.tail r
|
||||
from (_, _) = Nothing
|
||||
|
||||
toBytes :: C8.ByteString -> Either String BA.Bytes
|
||||
toBytes = BA.convertFromBase BA.Base16
|
||||
|
||||
-- | Show a hexadecimal encoded 'SHA256' hash digest and prefix
|
||||
-- @sha256:@ to it.
|
||||
showSHA :: Hash.Digest Hash.SHA256 -> String
|
||||
showSHA = ("sha256:" ++) . show
|
||||
|
||||
-- Pretty-printed example of the `manifest.json` file.
|
||||
{-
|
||||
[
|
||||
{
|
||||
"Config": "3e83c23dba6a16cd936a3dc044df71b26706c5a4c28181bc3ca4a4af9f5f38ee.json",
|
||||
"Layers": [
|
||||
"10a267c67f423630f3afe5e04bbbc93d578861ddcc54283526222f3ad5e895b9.tar"
|
||||
],
|
||||
"RepoTags": [
|
||||
"library/debian:jessie"
|
||||
]
|
||||
}
|
||||
]
|
||||
-}
|
||||
|
||||
-- Pretty-printed example of the `repositories` json file.
|
||||
{-
|
||||
{
|
||||
"library/debian": {
|
||||
"jessie": "10a267c67f423630f3afe5e04bbbc93d578861ddcc54283526222f3ad5e895b9"
|
||||
}
|
||||
}
|
||||
-}
|
||||
|
||||
-- | A layer hash digest from a docker image's config JSON. This hash
|
||||
-- is different from those found in the image's manifest JSON.
|
||||
type RefLayer = Text
|
||||
|
||||
-- | A 'String' representing the full repository tag, e.g: @library/debian@.
|
||||
type RepoTag = String
|
||||
|
||||
-- | A v1.2 docker image manifest.
|
||||
data ImageManifest = ImageManifest
|
||||
{ -- | 'FilePath' within the image archive of the image's config
|
||||
-- JSON
|
||||
config :: FilePath
|
||||
-- | List of image repository tags
|
||||
, repoTags :: [Text]
|
||||
-- | List of layers within the image archive named by their hash
|
||||
-- digest and with a @.tar@ extension
|
||||
, layers :: [FilePath]
|
||||
} deriving (Show, Eq)
|
||||
|
||||
-- | A map of 'ImageRepo's. The repository names are the top-level
|
||||
-- keys and their value is a map who's keys are the tags of the
|
||||
-- repository with the hash-value of the layer that tag references.
|
||||
data ImageRepositories = ImageRepositories [ImageRepo]
|
||||
deriving (Show, Eq)
|
||||
|
||||
data ImageRepo = ImageRepo
|
||||
{ -- | Repository tag
|
||||
repo :: Text
|
||||
-- | 'HashMap' of tags to the top-most layer associated with that tag
|
||||
, tags :: H.HashMap Text Text
|
||||
} deriving (Show, Eq)
|
||||
|
||||
$(deriveJSON stdOpts{ fieldLabelModifier = upperFirst } ''ImageManifest)
|
||||
|
||||
|
||||
|
||||
instance ToJSON ImageRepositories where
|
||||
toJSON (ImageRepositories r) =
|
||||
Object . H.unions $ [i | o@(Object i) <- (fmap toJSON r), isObject o]
|
||||
where
|
||||
isObject (Object _) = True
|
||||
isObject _ = False
|
||||
|
||||
instance ToJSON ImageRepo where
|
||||
toJSON (ImageRepo r t) = object [ r .= toJSON t ]
|
||||
|
||||
instance FromJSON ImageRepositories where
|
||||
parseJSON (Object v) = ImageRepositories <$> (mapM buildRepo $ H.toList v)
|
||||
where
|
||||
buildRepo (k,v') = ImageRepo k <$> parseJSON v'
|
||||
parseJSON v = typeMismatch "ImageRepositories" v
|
||||
|
@ -1,249 +0,0 @@
|
||||
{-# LANGUAGE LambdaCase #-}
|
||||
{-# LANGUAGE OverloadedStrings #-}
|
||||
{-# LANGUAGE RecordWildCards #-}
|
||||
{-# LANGUAGE ViewPatterns #-}
|
||||
|
||||
-----------------------------------------------------------------------------
|
||||
-- |
|
||||
-- Module : Data.Docker.Image.V1.Layer
|
||||
-- Copyright : (C) 2016 Awake Networks
|
||||
-- License : Apache-2.0
|
||||
-- Maintainer : Awake Networks <opensource@awakenetworks.com>
|
||||
-- Stability : stable
|
||||
--
|
||||
-- Many of these functions are named after their equivalent functions
|
||||
-- in the docker Golang source code.
|
||||
--
|
||||
-- <https://github.com/docker/docker/blob/6e8a2cd29113896acfc3f97a43dd27f751f2f519/layer/layer.go layer.go>
|
||||
----------------------------------------------------------------------------
|
||||
|
||||
module Data.Docker.Image.V1.Layer where
|
||||
|
||||
import Control.Monad
|
||||
import Data.Aeson
|
||||
import qualified Data.ByteString.Lazy.Char8 as CL8
|
||||
import Data.Coerce
|
||||
import Data.Docker.Image.V1.Types
|
||||
import Data.Foldable
|
||||
import qualified Data.HashMap.Strict as H
|
||||
import Data.Monoid
|
||||
import Data.Sequence as Seq
|
||||
import Data.Sequence.Lens
|
||||
import qualified Data.Text as T
|
||||
|
||||
import Lib
|
||||
|
||||
type Parent = ChainID
|
||||
type TopLayerJSON = Data.Aeson.Object
|
||||
|
||||
-- | Produce a @ChainID@ using a sequence of layer @DiffIDs@.
|
||||
--
|
||||
-- <https://github.com/docker/docker/blob/6e8a2cd29113896acfc3f97a43dd27f751f2f519/layer/layer.go#L239 layer.CreateChainID>
|
||||
createChainID :: Seq DiffID -- ^ A sequence of layer @DiffID@s, (usually) fetched from the image's config JSON.
|
||||
-> Maybe ChainID
|
||||
createChainID = createChainIDFromParent Nothing
|
||||
|
||||
-- | Produce a @ChainID@ given the @ChainID@ of a parent layer and a
|
||||
-- sequence of layer @DiffIDs@.
|
||||
--
|
||||
-- <https://github.com/docker/docker/blob/6e8a2cd29113896acfc3f97a43dd27f751f2f519/layer/layer.go#L243 layer.createChainIDFromParent>
|
||||
createChainIDFromParent :: Maybe Parent -- ^ Previous (parent) @ChainID@ in the sequence used to produce the next @ChainID@.
|
||||
-> Seq DiffID -- ^ A sequence of layer @DiffID@s, (usually) fetched from the image's config JSON.
|
||||
-> Maybe ChainID
|
||||
createChainIDFromParent parent (Seq.viewl -> EmptyL) = parent
|
||||
createChainIDFromParent parent (Seq.viewl -> h :< rest) =
|
||||
createChainIDFromParent (maybe root layer parent) rest
|
||||
where
|
||||
root = Just $ coerce h
|
||||
layer = Just . flip chainDigest h
|
||||
|
||||
createChainIDFromParent parent _ = parent
|
||||
|
||||
-- | Produce a @ChainID@ given a parent @ChainID@ and a layer
|
||||
-- @DiffID@.
|
||||
chainDigest :: Parent -- ^ Parent @ChainID@ used to produce a child @ChainID@.
|
||||
-> DiffID -- ^ Layer @DiffID@.
|
||||
-> ChainID
|
||||
chainDigest (show -> c) (show -> d) = ChainID .
|
||||
Lib.sha256 . CL8.pack $ concat [c, " ", d]
|
||||
|
||||
-- | Produce a sequence of @ChainID@s from a sequence of layer
|
||||
-- @DiffID@s.
|
||||
--
|
||||
-- <https://github.com/docker/docker/blob/b826bebda0cff2cc2d3083b954c810d2889eefe5/image/tarexport/save.go#L242 save.saveImage>
|
||||
chainIDSequence :: Seq DiffID
|
||||
-> Seq (Maybe ChainID)
|
||||
chainIDSequence diffIDSeq = mapWithIndex chainIDSlice diffIDSeq
|
||||
where
|
||||
chainIDSlice (succ -> i) _ =
|
||||
createChainID $ seqOf (slicedTo i) diffIDSeq
|
||||
|
||||
-- | Produce a sequence of unwrapped Just's from a sequence of
|
||||
-- Maybe's.
|
||||
squishMaybe :: MonadPlus m => m (Maybe a) -> m a
|
||||
squishMaybe = join . fmap adapt
|
||||
where
|
||||
adapt Nothing = mzero
|
||||
adapt (Just x) = return x
|
||||
|
||||
-- | Produce layer content ID hashes given an empty JSON config with
|
||||
-- the layer's @ChainID@ injected as the value of the `layer_id` key
|
||||
-- and, if not the base layer, the previous @ContentID@ injected as
|
||||
-- the value of the `parent` key.
|
||||
--
|
||||
-- The JSON that is encoded *must* be in the canonical format
|
||||
-- specified by Docker, please see @Lib.encodeCanonical@ for a
|
||||
-- convenience function to encode an @Aeson.Value@ satisfying those
|
||||
-- rules.
|
||||
contentIDSequence :: Seq ChainID -- ^ A sequence of @ChainID@s, please see @chainIDSequence@.
|
||||
-> TopLayerJSON -- ^ Config JSON paired with the top-most layer of the image.
|
||||
-> Seq ContentID
|
||||
contentIDSequence cids fj = foldl' (contentIDFold fj $ Seq.length cids) Seq.empty cids
|
||||
|
||||
-- | A folding function given to @foldl'@. This function computes the
|
||||
-- @ContentID@'s for each layer using the last computed @ContentID@ as
|
||||
-- the parent @ContentID@ for each iteration.
|
||||
--
|
||||
-- The first two arguments are closed over before being fed to
|
||||
-- @foldl'@ producing a partial function that satisfies @foldl'@'s
|
||||
-- first argument type signature.
|
||||
contentIDFold :: TopLayerJSON -- ^ Config JSON to be hashed with the top-most layer of the image.
|
||||
-> Int -- ^ Length of the @ChainID@ sequence being folded over.
|
||||
-> Seq ContentID -- ^ The sequence of @ContentID@s accumulated.
|
||||
-> ChainID -- ^ The @ChainID@ for producing a @ContentID@.
|
||||
-> Seq ContentID
|
||||
contentIDFold _ _ acc@(Seq.viewr -> EmptyR) chainid =
|
||||
acc |> hashContent Nothing chainid emptyLayerJSON
|
||||
contentIDFold topLayerJSON ln acc@(Seq.viewr -> _ :> parent) chainid =
|
||||
acc |> hashedContentID
|
||||
where
|
||||
-- Check to see if we're at the end of the sequence we're folding
|
||||
-- over, if so then hash the content using the top-layer config
|
||||
-- JSON instead of the empty JSON
|
||||
hashedContentID =
|
||||
if ln == (succ $ Seq.length acc)
|
||||
then hashContent (Just parent) chainid topLayerJSON
|
||||
else hashContent (Just parent) chainid emptyLayerJSON
|
||||
|
||||
contentIDFold _ _ acc chainid =
|
||||
acc |> hashContent Nothing chainid emptyLayerJSON
|
||||
|
||||
-- | Produce a @ContentID@, given a parent and a @ChainID@, builds the
|
||||
-- empty JSON object with those two values and encodes it following
|
||||
-- the canonical JSON rules.
|
||||
hashContent :: Maybe ContentID -- ^ Parent @ContentID@ for injection into the hashing JSON.
|
||||
-> ChainID -- ^ @ChainID@ to be hashed with the hashing JSON.
|
||||
-> Data.Aeson.Object -- ^ Aeson AST to be canonically encoded; this can be either the ephemeral JSON or the config JSON.
|
||||
-> ContentID
|
||||
hashContent p c jsn = mk $ ephemeralHashableLayerJSON p c jsn
|
||||
where
|
||||
mk = ContentID . Lib.sha256 . Lib.encodeCanonical
|
||||
|
||||
-- | @emptyLayerJSON@ produces "empty" JSON for use in layer content
|
||||
-- hashing.
|
||||
--
|
||||
-- The Aeson instances for @ContentID@, @DiffID@, and @ChainID@ will
|
||||
-- correctly output a hex serialization of the SHA256 digest and
|
||||
-- prefix it with "sha256:", which is necessary to correctly hash the
|
||||
-- layer config in the same way that Docker's Golang code does it.
|
||||
--
|
||||
-- NB: I've manually assembled this in the "canonical order" it needs
|
||||
-- to be in, in order to correctly hash the JSON string. There is also
|
||||
-- a custom Aeson pretty printing function that serializes ADTs into
|
||||
-- the canonical form and should make this function moot once an
|
||||
-- appropriate ADT is in place.
|
||||
--
|
||||
-- TODO: codify this as an ADT to get rid of this manual construction
|
||||
-- and make things clearer. For now, the manually constructed one is
|
||||
-- fine (to get things working).
|
||||
emptyLayerJSON :: Data.Aeson.Object
|
||||
emptyLayerJSON = H.fromList
|
||||
[ "container_config" .= object
|
||||
[ "Hostname" .= ("" :: String)
|
||||
, "Domainname" .= ("" :: String) -- NB: this one isn't cased like the others :(
|
||||
, "User" .= ("" :: String)
|
||||
, "AttachStdin" .= False
|
||||
, "AttachStdout" .= False
|
||||
, "AttachStderr" .= False
|
||||
, "Tty" .= False
|
||||
, "OpenStdin" .= False
|
||||
, "StdinOnce" .= False
|
||||
, "Env" .= (Nothing :: Maybe String)
|
||||
, "Cmd" .= (Nothing :: Maybe String)
|
||||
, "Image" .= ("" :: String)
|
||||
|
||||
-- This is a object with significant keys and empty values
|
||||
-- (don't ask me why)
|
||||
, "Volumes" .= (Nothing :: Maybe Data.Aeson.Value)
|
||||
, "WorkingDir" .= ("" :: String)
|
||||
, "Entrypoint" .= (Nothing :: Maybe String)
|
||||
, "OnBuild" .= (Nothing :: Maybe String)
|
||||
, "Labels" .= (Nothing :: Maybe [String])
|
||||
]
|
||||
|
||||
-- This is the "canonical" empty timestamp
|
||||
, "created" .= emptyTimeStamp
|
||||
]
|
||||
|
||||
-- | Produce an "empty" JSON object given a parent and a
|
||||
-- @ChainID@. This is used internally to produce the @ContentID@ hash
|
||||
-- for a given layer.
|
||||
ephemeralHashableLayerJSON :: Maybe ContentID -- ^ Parent @ContentID@, if Nothing, will not be included in the Aeson AST.
|
||||
-> ChainID -- ^ @ChainID@ of the layer we're producing the @ContentID@ for.
|
||||
-> Data.Aeson.Object -- ^ Aeson AST we want to inject the parent @ContentID@ and layer @ChainID@ into.
|
||||
-> Data.Aeson.Value
|
||||
ephemeralHashableLayerJSON parent layerid layerJSON =
|
||||
Object $ layerJSON `H.union` H.fromList
|
||||
([ "layer_id" .= layerid ] <> (maybeSingletonParent parent))
|
||||
|
||||
-- | Produce a layer JSON object given a parent, a @ContentID@, and an
|
||||
-- Aeson Value Object. This function is different from
|
||||
-- @ephemeralHashableLayerJSON@ in that its output is (later on)
|
||||
-- written to the filesystem alongside the `layer.tar` file within the
|
||||
-- directory named after the @ContentID@ hash.
|
||||
permanentLayerJSON :: Maybe ContentID
|
||||
-> ContentID
|
||||
-> Data.Aeson.Object
|
||||
-> Data.Aeson.Value
|
||||
permanentLayerJSON parent layerContentId layerJSON =
|
||||
Object $ layerJSON `H.union` H.fromList
|
||||
([ "id" .= (mkPermHash layerContentId) ] <> maybeSingletonParent (mkPermHash <$> parent))
|
||||
where
|
||||
mkPermHash = Lib.stripHashId . T.pack . show
|
||||
|
||||
-- TODO: this should be parsed into an ADT, transformed algebraically
|
||||
-- into what it should be, then re-encoded; instead of performing
|
||||
-- Map-based operations on the AST. This was the quicker option though
|
||||
-- for now; need to get something working first.
|
||||
imageConfig2LayerConfig :: Data.Aeson.Object
|
||||
-> Data.Aeson.Object
|
||||
imageConfig2LayerConfig = H.filterWithKey keyWhitelist
|
||||
where
|
||||
keyWhitelist k _ = k `elem`
|
||||
[ "container"
|
||||
, "container_config"
|
||||
, "docker_version"
|
||||
, "config"
|
||||
, "architecture"
|
||||
, "os"
|
||||
]
|
||||
|
||||
-- | Produce mempty if the parent is Nothing; if the parent is @Just
|
||||
-- ContentID@ then it returns a singleton list with the expected
|
||||
-- @Data.Aeson.Pair@ construction for the empty layer JSON.
|
||||
--
|
||||
-- The input argument is parameterized because the permanent JSON
|
||||
-- config objects store hashes with the "sha256:" prefix stripped, but
|
||||
-- the ephemeral JSON objects used to produce the Content ID hashes
|
||||
-- want the "sha256:" prefix to be present!
|
||||
maybeSingletonParent :: ToJSON a
|
||||
=> Maybe a
|
||||
-> [(T.Text, Data.Aeson.Value)]
|
||||
maybeSingletonParent = maybe mempty (singletonList . ("parent" .=))
|
||||
where
|
||||
-- Alternatively - singleton v = [v]
|
||||
singletonList = (: [])
|
||||
|
||||
-- | Produce the string "0001-01-01T00:00:00Z".
|
||||
emptyTimeStamp :: String
|
||||
emptyTimeStamp = "0001-01-01T00:00:00Z"
|
@ -1,108 +0,0 @@
|
||||
{-# LANGUAGE FlexibleContexts #-}
|
||||
{-# LANGUAGE LambdaCase #-}
|
||||
{-# LANGUAGE OverloadedStrings #-}
|
||||
{-# LANGUAGE RecordWildCards #-}
|
||||
|
||||
-----------------------------------------------------------------------------
|
||||
-- |
|
||||
-- Module : Data.Docker.Image.V1.Types
|
||||
-- Copyright : (C) 2016 Awake Networks
|
||||
-- License : Apache-2.0
|
||||
-- Maintainer : Awake Networks <opensource@awakenetworks.com>
|
||||
-- Stability : stable
|
||||
----------------------------------------------------------------------------
|
||||
|
||||
module Data.Docker.Image.V1.Types where
|
||||
|
||||
import qualified Crypto.Hash as Hash
|
||||
import Data.Aeson
|
||||
import Data.Aeson.Types
|
||||
import qualified Data.ByteArray as BA
|
||||
import qualified Data.ByteArray.Encoding as BA
|
||||
import qualified Data.ByteString.Char8 as C8
|
||||
import qualified Data.Text as T
|
||||
import Data.Text.Encoding (encodeUtf8)
|
||||
|
||||
-- | Attempt to parse a @C8.ByteString@ into a @Hash.Digest
|
||||
-- Hash.SHA256@.
|
||||
--
|
||||
-- A @Digest@ in Docker Golang-code parlance is the string hexadecimal
|
||||
-- representation of a hashing function's digest with the hashing
|
||||
-- function identifier prefixed onto the string. Right now they only
|
||||
-- use SHA256 everywhere and also don't really do anything to
|
||||
-- parameterize it.
|
||||
--
|
||||
-- There is a custom Show instance for this newtype to output a string
|
||||
-- representation of the digest prefixed by its hashing function
|
||||
-- identifier.
|
||||
toDigest :: C8.ByteString -> Maybe (Hash.Digest Hash.SHA256)
|
||||
toDigest = from . C8.break (== ':')
|
||||
where
|
||||
from ("sha256", r) = either (const Nothing) Hash.digestFromByteString . toBytes $ C8.tail r
|
||||
from (_, _) = Nothing
|
||||
|
||||
toBytes :: C8.ByteString -> Either String BA.Bytes
|
||||
toBytes = BA.convertFromBase BA.Base16
|
||||
|
||||
-- | A special kind of SHA256 hash digest identifying a layer by its
|
||||
-- *content*. This value is a hash of an empty, canonicalized JSON
|
||||
-- string with a "layer_id" (which is actually the layer's @ChainID@)
|
||||
-- and possibly a parent ID (which is the previous-layer-in-sequence
|
||||
-- @ContentID@).
|
||||
newtype ContentID = ContentID (Hash.Digest Hash.SHA256)
|
||||
deriving (Eq)
|
||||
|
||||
-- | A special kind of SHA256 digest identifying a specific sequence
|
||||
-- of layers.
|
||||
--
|
||||
-- <https://github.com/docker/docker/blob/6e8a2cd29113896acfc3f97a43dd27f751f2f519/layer/layer.go#L60 layer.ChainID>
|
||||
newtype ChainID = ChainID (Hash.Digest Hash.SHA256)
|
||||
deriving (Eq)
|
||||
|
||||
-- | A special kind of a SHA256 digest identifying a layer by the
|
||||
-- sha256 sum of the uncompressed layer tarball. "Diff" in this
|
||||
-- context refers to the root filesystem contents of the tarball
|
||||
-- identified by @DiffID@ representing the difference from the
|
||||
-- previous layer.
|
||||
--
|
||||
-- <https://github.com/docker/docker/blob/6e8a2cd29113896acfc3f97a43dd27f751f2f519/layer/layer.go#L68 layer.DiffID>
|
||||
newtype DiffID = DiffID (Hash.Digest Hash.SHA256)
|
||||
deriving (Eq)
|
||||
|
||||
-- | Show a hexadecimal encoded SHA256 hash digest and prefix
|
||||
-- "sha256:" to it.
|
||||
showSHA :: Hash.Digest Hash.SHA256 -> String
|
||||
showSHA = ("sha256:" ++) . show
|
||||
|
||||
instance Show ContentID where
|
||||
show (ContentID d) = showSHA d
|
||||
instance Show ChainID where
|
||||
show (ChainID d) = showSHA d
|
||||
instance Show DiffID where
|
||||
show (DiffID d) = showSHA d
|
||||
|
||||
instance ToJSON ContentID where
|
||||
toJSON v = String . T.pack $ show v
|
||||
instance ToJSON ChainID where
|
||||
toJSON v = String . T.pack $ show v
|
||||
instance ToJSON DiffID where
|
||||
toJSON v = String . T.pack $ show v
|
||||
|
||||
instance FromJSON ContentID where
|
||||
parseJSON o@(String v) =
|
||||
case toDigest $ encodeUtf8 v of
|
||||
Just v' -> return $ ContentID v'
|
||||
Nothing -> typeMismatch "SHA256 Digest" o
|
||||
parseJSON inv = typeMismatch "SHA256 Digest" inv
|
||||
instance FromJSON ChainID where
|
||||
parseJSON o@(String v) =
|
||||
case toDigest $ encodeUtf8 v of
|
||||
Just v' -> return $ ChainID v'
|
||||
Nothing -> typeMismatch "SHA256 Digest" o
|
||||
parseJSON inv = typeMismatch "SHA256 Digest" inv
|
||||
instance FromJSON DiffID where
|
||||
parseJSON o@(String v) =
|
||||
case toDigest $ encodeUtf8 v of
|
||||
Just v' -> return $ DiffID v'
|
||||
Nothing -> typeMismatch "SHA256 Digest" o
|
||||
parseJSON inv = typeMismatch "SHA256 Digest" inv
|
@ -1,108 +0,0 @@
|
||||
{-# LANGUAGE FlexibleContexts #-}
|
||||
{-# LANGUAGE LambdaCase #-}
|
||||
{-# LANGUAGE OverloadedStrings #-}
|
||||
{-# LANGUAGE PartialTypeSignatures #-}
|
||||
{-# LANGUAGE RecordWildCards #-}
|
||||
{-# LANGUAGE TemplateHaskell #-}
|
||||
|
||||
-----------------------------------------------------------------------------
|
||||
-- |
|
||||
-- Module : Data.Docker.Image.V1_2.Types
|
||||
-- Copyright : (C) 2016 Awake Networks
|
||||
-- License : Apache-2.0
|
||||
-- Maintainer : Awake Networks <opensource@awakenetworks.com>
|
||||
-- Stability : stable
|
||||
--
|
||||
-- The types in this module are used to describe two specific pieces
|
||||
-- of JSON within the v1.2 Docker Image spec: @manifest.json@ and
|
||||
-- @repositories@.
|
||||
----------------------------------------------------------------------------
|
||||
|
||||
module Data.Docker.Image.V1_2.Types where
|
||||
|
||||
import Data.Aeson
|
||||
import Data.Aeson.TH
|
||||
import Data.Aeson.Types
|
||||
import Data.HashMap.Strict as H
|
||||
import qualified Data.Text as T
|
||||
|
||||
import Data.Docker.Image.AesonHelpers
|
||||
import Lib
|
||||
|
||||
----------------------------------------------------------------------------
|
||||
--
|
||||
|
||||
-- Pretty-printed example of the `manifest.json` file.
|
||||
{-
|
||||
[
|
||||
{
|
||||
"Config": "3e83c23dba6a16cd936a3dc044df71b26706c5a4c28181bc3ca4a4af9f5f38ee.json",
|
||||
"Layers": [
|
||||
"10a267c67f423630f3afe5e04bbbc93d578861ddcc54283526222f3ad5e895b9.tar"
|
||||
],
|
||||
"RepoTags": [
|
||||
"library/debian:jessie"
|
||||
]
|
||||
}
|
||||
]
|
||||
-}
|
||||
|
||||
-- Pretty-printed example of the `repositories` json file.
|
||||
{-
|
||||
{
|
||||
"library/debian": {
|
||||
"jessie": "10a267c67f423630f3afe5e04bbbc93d578861ddcc54283526222f3ad5e895b9"
|
||||
}
|
||||
}
|
||||
-}
|
||||
|
||||
-- | A 'Text' representing a layer hash digest sourced from a docker
|
||||
-- image's config JSON (different from the image's manifest JSON).
|
||||
type RefLayer = T.Text
|
||||
|
||||
-- | A 'String' representing the full repository tag, e.g: @library/debian@.
|
||||
type RepoTag = String
|
||||
|
||||
-- | Represents a v1.2 Docker Image manifest.
|
||||
data ImageManifest = ImageManifest
|
||||
{ -- | 'FilePath' within the image archive of the image's config
|
||||
-- JSON
|
||||
config :: FilePath
|
||||
-- | List of image repository tags
|
||||
, repoTags :: [T.Text]
|
||||
-- | List of layers within the image archive named by their hash
|
||||
-- digest and with the tar extension appended
|
||||
, layers :: [FilePath]
|
||||
} deriving (Show, Eq)
|
||||
|
||||
-- | Represents an object of 'ImageRepo's. The repository names are the
|
||||
-- top-level keys and their value is an object who's keys are the tags
|
||||
-- of the repository with the hash-value of the layer that tag
|
||||
-- references.
|
||||
data ImageRepositories = ImageRepositories [ImageRepo]
|
||||
deriving (Show, Eq)
|
||||
|
||||
data ImageRepo = ImageRepo
|
||||
{ -- | Repository tag
|
||||
repo :: T.Text
|
||||
-- | 'HashMap' of tags to the top-most layer associated with that tag
|
||||
, tags :: H.HashMap T.Text T.Text
|
||||
} deriving (Show, Eq)
|
||||
|
||||
$(deriveJSON stdOpts{ fieldLabelModifier = upperFirst } ''ImageManifest)
|
||||
|
||||
instance ToJSON ImageRepositories where
|
||||
toJSON (ImageRepositories r) =
|
||||
Object . H.unions $ [i | o@(Object i) <- (fmap toJSON r), isObject o]
|
||||
where
|
||||
isObject (Object _) = True
|
||||
isObject _ = False
|
||||
|
||||
instance ToJSON ImageRepo where
|
||||
toJSON (ImageRepo r t) = object [ r .= toJSON t ]
|
||||
|
||||
instance FromJSON ImageRepositories where
|
||||
parseJSON (Object v) = ImageRepositories <$> (mapM buildRepo $ H.toList v)
|
||||
where
|
||||
buildRepo (k,v') = ImageRepo k <$> parseJSON v'
|
||||
parseJSON v = typeMismatch "ImageRepositories" v
|
@ -11,11 +11,11 @@
|
||||
-- Stability : stable
|
||||
--
|
||||
-- This module only re-exports Nix modules providing Docker-specific
|
||||
-- functionality as it pertains to Nix.
|
||||
-- functionality as it pertains to generation of Nix expression.
|
||||
----------------------------------------------------------------------------
|
||||
|
||||
module Data.Docker.Nix
|
||||
( -- * Generating `fetchdocker` Nix Derivation Expressions
|
||||
( -- * Generate nix build instructions for a docker image
|
||||
module Data.Docker.Nix.FetchDocker
|
||||
) where
|
||||
|
||||
|
@ -17,23 +17,24 @@ module Data.Docker.Nix.FetchDocker where
|
||||
|
||||
import Control.Lens
|
||||
import Control.Monad
|
||||
import Control.Monad.Except as Except
|
||||
import Control.Monad.Except as Except
|
||||
import Data.Aeson.Lens
|
||||
import qualified Data.Bifunctor as Bifunctor
|
||||
import qualified Data.Bifunctor as Bifunctor
|
||||
import Data.Coerce
|
||||
import Data.Fix
|
||||
import Data.Maybe
|
||||
import Data.Monoid
|
||||
import qualified Data.Text as T
|
||||
import Data.Text.Encoding (decodeUtf8')
|
||||
import Data.Text (Text)
|
||||
import qualified Data.Text as Text
|
||||
import Data.Text.Encoding (decodeUtf8')
|
||||
import Data.Text.Encoding.Error
|
||||
import Nix.Expr
|
||||
import URI.ByteString
|
||||
|
||||
import Data.Docker.Image.Types
|
||||
import Data.Docker.Nix.Lib as Nix.Lib
|
||||
import Data.Docker.Nix.Lib as Nix.Lib
|
||||
import Lib
|
||||
import Network.Wreq.Docker.Registry.V2 (pluckLayersFrom)
|
||||
import Network.Wreq.Docker.Registry (pluckLayersFrom)
|
||||
import Types
|
||||
import Types.Exceptions
|
||||
import Types.ImageTag
|
||||
@ -60,22 +61,24 @@ fetchdocker rec {
|
||||
}
|
||||
-}
|
||||
|
||||
-- | @fetchdocker@ derivation name.
|
||||
constFetchdocker :: T.Text
|
||||
-- | @fetchdocker@ function name.
|
||||
constFetchdocker :: Text
|
||||
constFetchdocker = "fetchdocker"
|
||||
|
||||
-- | @fetchDockerConfig@ derivation name.
|
||||
constFetchDockerConfig :: T.Text
|
||||
-- | @fetchDockerConfig@ function name.
|
||||
constFetchDockerConfig :: Text
|
||||
constFetchDockerConfig = "fetchDockerConfig"
|
||||
|
||||
-- | @fetchDockerLayer@ derivation name.
|
||||
constFetchDockerLayer :: T.Text
|
||||
-- | @fetchDockerLayer@ function name.
|
||||
constFetchDockerLayer :: Text
|
||||
constFetchDockerLayer = "fetchDockerLayer"
|
||||
|
||||
-- | Generate a Nix expression AST from a @HockerImageMeta@
|
||||
-- record. This function crucially checks that the supplied manifest
|
||||
-- JSON contains a key in the top-level object describing what version
|
||||
-- of the manifest we have.
|
||||
-- record.
|
||||
--
|
||||
-- This function checks that the supplied manifest JSON contains a key
|
||||
-- in the top-level object describing what version of the manifest we
|
||||
-- have.
|
||||
generate :: HockerImageMeta -> IO (Either HockerException NExpr)
|
||||
generate dim@HockerImageMeta{..} = runExceptT $
|
||||
case (manifestJSON ^? key "schemaVersion" . _Integer) of
|
||||
@ -87,7 +90,7 @@ generate dim@HockerImageMeta{..} = runExceptT $
|
||||
|
||||
ExceptT (pure $ generateFetchDockerExpr dim configDigest layerDigests)
|
||||
Just v ->
|
||||
throwError $ HockerException ("Expected: 2 but got: " <> (show v)) Nothing Nothing
|
||||
throwError $ HockerException ("Expected a version 2 manifest but got version " <> (show v)) Nothing Nothing
|
||||
Nothing ->
|
||||
throwError $ HockerException "No key 'schemaVersion' in JSON object" Nothing Nothing
|
||||
|
||||
@ -102,28 +105,27 @@ generate dim@HockerImageMeta{..} = runExceptT $
|
||||
{-| Generate a top-level Nix Expression AST from a 'HockerImageMeta'
|
||||
record, a config digest, and a list of layer digests.
|
||||
|
||||
The generated AST, pretty-printed, may look similar to the following:
|
||||
@
|
||||
{ fetchdocker, fetchDockerConfig, fetchDockerLayer }:
|
||||
fetchdocker rec {
|
||||
name = "debian";
|
||||
registry = "https://registry-1.docker.io/v2/";
|
||||
repository = "library";
|
||||
imageName = "debian";
|
||||
tag = "latest";
|
||||
imageConfig = fetchDockerConfig {
|
||||
inherit registry repository imageName tag;
|
||||
sha256 = "1viqbygsz9547jy830f2lk2hcrxjf7gl9h1xda9ws5kap8yw50ry";
|
||||
};
|
||||
imageLayers = let
|
||||
layer0 = fetchDockerLayer {
|
||||
inherit registry repository imageName;
|
||||
layerDigest = "10a267c67f423630f3afe5e04bbbc93d578861ddcc54283526222f3ad5e895b9";
|
||||
sha256 = "1fcmx3aklbr24qsjhm6cvmhqhmrxr6xlpq75mzrk0dj2gz36g8hh";
|
||||
};
|
||||
in [ layer0 ];
|
||||
}
|
||||
@
|
||||
The generated AST, pretty printed, may look similar to the following:
|
||||
|
||||
> { fetchdocker, fetchDockerConfig, fetchDockerLayer }:
|
||||
> fetchdocker rec {
|
||||
> name = "debian";
|
||||
> registry = "https://registry-1.docker.io/v2/";
|
||||
> repository = "library";
|
||||
> imageName = "debian";
|
||||
> tag = "latest";
|
||||
> imageConfig = fetchDockerConfig {
|
||||
> inherit registry repository imageName tag;
|
||||
> sha256 = "1viqbygsz9547jy830f2lk2hcrxjf7gl9h1xda9ws5kap8yw50ry";
|
||||
> };
|
||||
> imageLayers = let
|
||||
> layer0 = fetchDockerLayer {
|
||||
> inherit registry repository imageName;
|
||||
> layerDigest = "10a267c67f423630f3afe5e04bbbc93d578861ddcc54283526222f3ad5e895b9";
|
||||
> sha256 = "1fcmx3aklbr24qsjhm6cvmhqhmrxr6xlpq75mzrk0dj2gz36g8hh";
|
||||
> };
|
||||
> in [ layer0 ];
|
||||
> }
|
||||
-}
|
||||
generateFetchDockerExpr :: HockerImageMeta -> ConfigDigest -> [(Base16Digest, Base32Digest)] -> Either HockerException NExpr
|
||||
generateFetchDockerExpr dim@HockerImageMeta{..} configDigest layerDigests = do
|
||||
@ -132,7 +134,7 @@ generateFetchDockerExpr dim@HockerImageMeta{..} configDigest layerDigests = do
|
||||
, StaticKey "repository"
|
||||
, StaticKey "imageName"
|
||||
]
|
||||
let genLayerId i = mkSym . T.pack $ "layer" <> show i
|
||||
let genLayerId i = mkSym . Text.pack $ "layer" <> show i
|
||||
let fetchconfig = mkFetchDockerConfig (inherit $ ((StaticKey "tag"):commonInherits)) configDigest
|
||||
fetchlayers =
|
||||
mkLets
|
||||
@ -148,8 +150,8 @@ generateFetchDockerExpr dim@HockerImageMeta{..} configDigest layerDigests = do
|
||||
]) fetchDockerExpr)
|
||||
|
||||
-- | Generate a @fetchdocker { ... }@ function call and argument
|
||||
-- attribute set. Please see 'generateNixExprs' documentation for an
|
||||
-- example of full output.
|
||||
-- attribute set. Please see 'generateFetchDockerExpr' documentation
|
||||
-- for an example of full output.
|
||||
mkFetchDocker :: HockerImageMeta -> NExpr -> NExpr -> Either HockerException NExpr
|
||||
mkFetchDocker HockerImageMeta{..} fetchconfig fetchlayers = do
|
||||
registry <- Bifunctor.first mkHockerException serializedRegistry
|
||||
@ -160,7 +162,7 @@ mkFetchDocker HockerImageMeta{..} fetchconfig fetchlayers = do
|
||||
, ("registry", mkStr registry)
|
||||
, ("repository", mkStr imageRepo)
|
||||
, ("imageName", mkStr imageName)
|
||||
, ("tag", mkStr (T.pack $ coerce imageTag))
|
||||
, ("tag", mkStr (Text.pack $ coerce imageTag))
|
||||
, ("imageConfig", fetchconfig)
|
||||
, ("imageLayers", fetchlayers)
|
||||
]))
|
||||
@ -173,18 +175,22 @@ mkFetchDocker HockerImageMeta{..} fetchconfig fetchlayers = do
|
||||
|
||||
|
||||
-- | Generate a @fetchDockerConfig { ... }@ function call and
|
||||
-- argument attrset. This function takes an argument for a list of
|
||||
-- static keys to inherit from the parent attribute set; it helps
|
||||
-- reduce the noise in the output expression.
|
||||
-- argument attrset.
|
||||
--
|
||||
-- This function takes an argument for a list of static keys to
|
||||
-- inherit from the parent attribute set; it helps reduce the noise in
|
||||
-- the output expression.
|
||||
mkFetchDockerConfig :: Binding NExpr -> Base32Digest -> NExpr
|
||||
mkFetchDockerConfig inherits (Base32Digest digest) =
|
||||
mkApp (mkSym constFetchDockerConfig)
|
||||
(Fix $ NSet [ inherits, "sha256" $= (mkStr digest) ])
|
||||
|
||||
-- | Generate a list of Nix expression ASTs representing
|
||||
-- @fetchDockerLayer { ... }@ function calls. This function takes
|
||||
-- an argument for a list of static keys to inherit from the parent
|
||||
-- attribute set; it helps reduce the noise in the output expression.
|
||||
-- @fetchDockerLayer { ... }@ function calls.
|
||||
--
|
||||
-- This function takes an argument for a list of static keys to
|
||||
-- inherit from the parent attribute set; it helps reduce the noise in
|
||||
-- the output expression.
|
||||
--
|
||||
-- NB: the hash digest tuple in the second argument is the base16
|
||||
-- encoded hash digest plucked from the image's manifest JSON and a
|
||||
@ -201,7 +207,7 @@ mkFetchDockerLayers :: Binding NExpr -> [(Base16Digest, Base32Digest)] -> [Bindi
|
||||
mkFetchDockerLayers inherits layerDigests =
|
||||
fmap mkFetchLayer $ Prelude.zip [0..(Prelude.length layerDigests)] layerDigests
|
||||
where
|
||||
mkLayerId i = T.pack $ "layer" <> show i
|
||||
mkLayerId i = Text.pack $ "layer" <> show i
|
||||
mkFetchLayer (i, ((Base16Digest d16), (Base32Digest d32))) =
|
||||
(mkLayerId i) $= mkApp (mkSym constFetchDockerLayer)
|
||||
(Fix $ NSet
|
||||
|
@ -15,43 +15,48 @@
|
||||
|
||||
module Data.Docker.Nix.Lib where
|
||||
|
||||
import Control.Foldl as Foldl
|
||||
import Control.Foldl as Foldl
|
||||
import Turtle
|
||||
import Control.Monad.Except as Except
|
||||
import qualified Data.Text as T
|
||||
import qualified Data.Text as Text
|
||||
|
||||
import Types
|
||||
import Types.Exceptions
|
||||
|
||||
-- | Convert a @Base16Digest@ to a @Base32Digest@ using the supplied
|
||||
-- `nix-hash` utility.
|
||||
-- | Convert a 'Base16Digest' to a 'Base32Digest' using the @nix-hash@
|
||||
-- utility.
|
||||
--
|
||||
-- NB: Nix implements its own custom base32 encoding function for
|
||||
-- hashes that is not compatible with other more standard and native
|
||||
-- implementations in Haskell. I opted to call out to `nix-hash`
|
||||
-- instead of re-implementing their algorithm here in Haskell because
|
||||
-- it's non-standard and may change, creating a maintenance headache
|
||||
-- and "surprise" behavior for users.
|
||||
-- implementations in Haskell. I opted to call out to @nix-hash@
|
||||
-- instead of re-implementing their algorithm because it's
|
||||
-- non-standard and may change, creating a maintenance headache and
|
||||
-- surprise behavior.
|
||||
toBase32Nix :: (MonadIO m, Except.MonadError HockerException m)
|
||||
=> Prelude.FilePath -- ^ Path to the `nix-hash` executable, see @Lib.findExec@.
|
||||
-> Base16Digest -- ^ @Base16@ hash digest to @Base32@ encode.
|
||||
=> Prelude.FilePath -- ^ Path to the @nix-hash@ executable, see 'Lib.findExec'
|
||||
-> Base16Digest -- ^ 'Base16Digest' to @base32@ encode
|
||||
-> m Base32Digest
|
||||
toBase32Nix nixhash (Base16Digest d16) =
|
||||
toBase32Nix nixhash (Base16Digest d16) = do
|
||||
let hockerExc m = HockerException m Nothing Nothing
|
||||
let convertDigest =
|
||||
inprocWithErr
|
||||
(Text.pack nixhash)
|
||||
[ "--type"
|
||||
, "sha256"
|
||||
, "--to-base32"
|
||||
, d16
|
||||
]
|
||||
Turtle.empty
|
||||
|
||||
Turtle.fold convertDigest Foldl.head >>= \case
|
||||
Nothing -> throwError $ HockerException "nothing was returned by `nix-hash', not even an error" Nothing Nothing
|
||||
Nothing ->
|
||||
throwError
|
||||
(HockerException
|
||||
"nothing was returned by `nix-hash', not even an error"
|
||||
Nothing
|
||||
Nothing)
|
||||
Just result ->
|
||||
either
|
||||
(throwError . hockerExc . T.unpack . lineToText)
|
||||
(throwError . hockerExc . Text.unpack . lineToText)
|
||||
(return . Base32Digest . lineToText)
|
||||
result
|
||||
where
|
||||
hockerExc m = HockerException m Nothing Nothing
|
||||
convertDigest =
|
||||
inprocWithErr
|
||||
(T.pack nixhash)
|
||||
[ "--type"
|
||||
, "sha256"
|
||||
, "--to-base32"
|
||||
, d16
|
||||
]
|
||||
Turtle.empty
|
||||
|
111
src/Lib.hs
111
src/Lib.hs
@ -27,10 +27,10 @@ import qualified Data.Aeson.Encode.Pretty as AP
|
||||
import Data.Aeson.Lens
|
||||
import qualified Data.ByteString.Char8 as C8
|
||||
import Data.ByteString.Lazy.Char8 as C8L
|
||||
import Data.Char
|
||||
import Data.Coerce
|
||||
import Data.Monoid
|
||||
import qualified Data.Text as T
|
||||
import Data.Text (Text)
|
||||
import qualified Data.Text as Text
|
||||
import Data.Text.Encoding (encodeUtf8)
|
||||
import qualified Network.Wreq as Wreq
|
||||
import Nix.Expr (NExpr)
|
||||
@ -44,17 +44,16 @@ import Text.PrettyPrint.ANSI.Leijen as Text.PrettyPrint (SimpleDoc,
|
||||
renderPretty)
|
||||
import URI.ByteString
|
||||
|
||||
import Data.Docker.Image.V1.Types
|
||||
|
||||
import Data.Docker.Image.Types
|
||||
import Types
|
||||
import Types.Exceptions
|
||||
import Types.ImageName
|
||||
import Types.ImageTag
|
||||
|
||||
-- | Throw a @userError@, exiting the program with the supplied
|
||||
-- | Throw a 'userError', exiting the program with the supplied
|
||||
-- message.
|
||||
die :: MonadIO io => T.Text -> io a
|
||||
die = liftIO . throwIO . userError . T.unpack
|
||||
die :: MonadIO io => Text -> io a
|
||||
die = liftIO . throwIO . userError . Text.unpack
|
||||
|
||||
-- | Print an error message to stderr and return a non-zero exit code,
|
||||
-- the message is prefixed with the name of the program.
|
||||
@ -63,57 +62,67 @@ exitProgFail msg = do
|
||||
name <- getProgName
|
||||
Exit.die $ name ++ ": " ++ msg
|
||||
|
||||
-- | Writes a bytestring to the provided filesystem path if it
|
||||
-- @isJust@ and prints the path it wrote to the screen, otherwise
|
||||
-- print the entire contents to the screen.
|
||||
-- | Print the bytestring to stdout if the first argument is
|
||||
-- @Nothing@, otherwise write the bytestring to the provided
|
||||
-- filesystem path and print the path to stdout.
|
||||
writeOrPrint :: Maybe FilePath -> C8L.ByteString -> IO ()
|
||||
writeOrPrint o r = case o of
|
||||
Just p' -> C8L.writeFile p' r >> Prelude.putStrLn p'
|
||||
Nothing -> C8L.putStrLn r
|
||||
writeOrPrint filepath content = maybe (C8L.putStrLn content) writeContent filepath
|
||||
where
|
||||
writeContent p = C8L.writeFile p content >> Prelude.putStrLn p
|
||||
|
||||
-- | Make a path given a base path and a docker container name.
|
||||
mkOutImage :: ImageName -> FilePath -> FilePath
|
||||
-- | Combine an image name and a base path producing an output path.
|
||||
mkOutImage :: ImageName -- ^ Docker image name
|
||||
-> FilePath -- ^ Base path to write to
|
||||
-> FilePath
|
||||
mkOutImage n o = o </> (takeBaseName $ coerce n)
|
||||
|
||||
-- | Make a path given a base path, a docker container name, and a
|
||||
-- docker container tag appending "-config.json" to the basename.
|
||||
mkOutConfig :: ImageName -> ImageTag -> FilePath -> FilePath
|
||||
-- | Combine an image name, an image tag, and a base path producing an
|
||||
-- output path with a @-config.json@ suffix.
|
||||
mkOutConfig :: ImageName -- ^ Docker image name
|
||||
-> ImageTag -- ^ Docker image tag
|
||||
-> FilePath -- ^ Base path to write to
|
||||
-> FilePath
|
||||
mkOutConfig n t o = o </> Prelude.concat
|
||||
[ (takeBaseName $ coerce n)
|
||||
, "_", coerce t
|
||||
, "-config.json"
|
||||
]
|
||||
|
||||
-- | Make a path given a base path, a docker container name, and a
|
||||
-- docker container tag appending "-manifest.json" to the basename.
|
||||
mkOutManifest :: ImageName -> ImageTag -> FilePath -> FilePath
|
||||
-- | Combine an image name, an image tag, and a base path producing an
|
||||
-- output path with a @-manifest.json@ suffix.
|
||||
mkOutManifest :: ImageName -- ^ Docker image name
|
||||
-> ImageTag -- ^ Docker image tag
|
||||
-> FilePath -- ^ Base path to write to
|
||||
-> FilePath
|
||||
mkOutManifest n t o = o </> Prelude.concat
|
||||
[ (takeBaseName $ coerce n)
|
||||
, "_", coerce t
|
||||
, "-manifest.json"
|
||||
]
|
||||
|
||||
-- | Safely join a list of strings and a Network.URI record together
|
||||
-- using @joinPath@.
|
||||
joinURIPath :: [String] -> RegistryURI -> RegistryURI
|
||||
-- | Join a list of strings and the path part of a 'RegistryURI' to
|
||||
-- produce a new 'RegistryURI' with a path root of @/v2@.
|
||||
joinURIPath :: [String] -- ^ Extra path segments to add
|
||||
-> RegistryURI -- ^ Base URI to add path segments to
|
||||
-> RegistryURI
|
||||
joinURIPath pts uri@URI{..} = uri { uriPath = joinedParts }
|
||||
where
|
||||
joinedParts = C8.pack $ File.joinPath ("/":"v2":(C8.unpack uriPath):pts)
|
||||
|
||||
-- | Produce an @Wreq.Options@ using @Network.Wreq.defaults@ and an @Auth@.
|
||||
-- | Given a 'Wreq.Auth' produce a 'Wreq.Options'.
|
||||
opts :: Maybe Wreq.Auth -> Wreq.Options
|
||||
opts bAuth = Wreq.defaults & Wreq.auth .~ bAuth
|
||||
|
||||
-- | Hash a @Data.ByteString.Lazy.Char8@ using the SHA256 algorithm.
|
||||
-- | Hash a 'Data.ByteString.Lazy.Char8' using the 'Hash.SHA256'
|
||||
-- algorithm.
|
||||
sha256 :: C8L.ByteString -> Hash.Digest Hash.SHA256
|
||||
sha256 = Hash.hashlazy
|
||||
|
||||
-- | Strip the hash algorithm identifier prefix from the beginning of
|
||||
-- a hash digest string; e.g: "sha256:<digest>" becomes "<digest>".
|
||||
stripHashId :: T.Text -> T.Text
|
||||
stripHashId = snd . T.breakOnEnd ":"
|
||||
-- | Strip the @sha256:@ identifier prefix from a hash digest.
|
||||
stripHashId :: Text -> Text
|
||||
stripHashId = snd . Text.breakOnEnd ":"
|
||||
|
||||
-- | Encode, following Docker's canonical JSON rules, any @ToJSON@
|
||||
-- | Encode, following Docker's canonical JSON rules, any 'ToJSON'
|
||||
-- data type.
|
||||
--
|
||||
-- The canonicalization rules enable consistent hashing of encoded
|
||||
@ -127,24 +136,22 @@ stripHashId = snd . T.breakOnEnd ":"
|
||||
encodeCanonical :: Data.Aeson.ToJSON a => a -> C8L.ByteString
|
||||
encodeCanonical = AP.encodePretty' conf
|
||||
where
|
||||
-- NB: the spec requires keys to be in lexically sorted order and
|
||||
-- it appears that the Ord instance of @Text@ behaves the same way
|
||||
-- the Ord instance for @String@ does: it sorts lexically.
|
||||
conf = AP.defConfig { AP.confIndent = AP.Spaces 0, AP.confCompare = compare }
|
||||
|
||||
-- | Throw an error if `Maybe FilePath` is `Nothing`, otherwise return
|
||||
-- the @FilePath@ unwrapped.
|
||||
requireOutPath :: (Except.MonadError HockerException m)
|
||||
-- | Throw an error if the first argument is @Nothing@, otherwise
|
||||
-- return the @FilePath@ unwrapped.
|
||||
requirePath :: (Except.MonadError HockerException m)
|
||||
=> Maybe FilePath
|
||||
-> m (FilePath)
|
||||
requireOutPath = maybe outPathError return
|
||||
requirePath = maybe pathError pure
|
||||
where
|
||||
outPathError = Except.throwError $
|
||||
hockerException "To fetch and assemble a docker image, `--out=<path>` must be supplied"
|
||||
pathError =
|
||||
Except.throwError
|
||||
(hockerException "To fetch and assemble a docker image, '--out=<path>' must be supplied")
|
||||
|
||||
-- | Pluck the digest value for the config JSON given a docker
|
||||
-- | Pluck out the digest value for the config JSON given a docker
|
||||
-- registry image manifest. Attempting to parse and return the digest
|
||||
-- value as a `Digest SHA256`, otherwise throwing an error.
|
||||
-- value as a 'Hash.SHA256', otherwise throw an error.
|
||||
getConfigDigest :: (Except.MonadError HockerException m)
|
||||
=> C8L.ByteString
|
||||
-> m (Hash.Digest Hash.SHA256)
|
||||
@ -154,29 +161,23 @@ getConfigDigest (view (key "config" . key "digest" . _String) -> digest) =
|
||||
parsedDigest = toDigest $ encodeUtf8 digest
|
||||
badDigest = Except.throwError $ hockerException "Failed parsing the config hash digest"
|
||||
|
||||
-- | @upperFirst@ uppercases the first letter of the string.
|
||||
upperFirst :: String -> String
|
||||
upperFirst [] = []
|
||||
upperFirst (h:t) = toUpper h : t
|
||||
|
||||
|
||||
-- | Split a docker image's name on the forward slash separator so we
|
||||
-- get the distinct repo name and image name.
|
||||
splitImgName :: ImageName -> (RepoNamePart, ImageNamePart)
|
||||
splitImgName (ImageName (T.pack -> n)) = over _2 T.tail $ T.break (=='/') n
|
||||
splitRepository :: ImageName -> (RepoNamePart, ImageNamePart)
|
||||
splitRepository (ImageName (Text.pack -> n)) = over _2 Text.tail $ Text.break (=='/') n
|
||||
|
||||
-- | Pretty print a Nix expression and return a
|
||||
-- @Text.PrettyPrint.SimpleDoc@, this can in turn be displayed to the
|
||||
-- screen using @Text.PrettyPrint.displayIO@ or transformed into a
|
||||
-- string using @Text.PrettyPrint.displayS@.
|
||||
-- | Given a nix expression AST, produce a pretty printer document.
|
||||
renderNixExpr :: NExpr -> Text.PrettyPrint.SimpleDoc
|
||||
renderNixExpr = renderPretty 0.4 120 . prettyNix
|
||||
|
||||
-- | Pretty print a Nix expression AST and print to stdout.
|
||||
-- | Print a nix expression AST using the 'renderNixExpr' pretty
|
||||
-- printing renderer.
|
||||
pprintNixExpr :: NExpr -> IO ()
|
||||
pprintNixExpr expr = Prelude.putStrLn (displayS (renderNixExpr expr) "")
|
||||
|
||||
-- | Given an executable's name, try to find it in the current
|
||||
-- process's PATH context.
|
||||
-- | Given an executable's name, try to find it in the PATH.
|
||||
findExec :: (MonadIO m, Except.MonadError HockerException m)
|
||||
=> String
|
||||
-> m Prelude.FilePath
|
||||
|
137
src/Network/Wreq/Docker/Image.hs
Normal file
137
src/Network/Wreq/Docker/Image.hs
Normal file
@ -0,0 +1,137 @@
|
||||
{-# LANGUAGE FlexibleContexts #-}
|
||||
{-# LANGUAGE LambdaCase #-}
|
||||
{-# LANGUAGE OverloadedStrings #-}
|
||||
{-# LANGUAGE QuasiQuotes #-}
|
||||
{-# LANGUAGE RecordWildCards #-}
|
||||
{-# LANGUAGE ScopedTypeVariables #-}
|
||||
|
||||
-----------------------------------------------------------------------------
|
||||
-- |
|
||||
-- Module : Network.Wreq.Docker.Image
|
||||
-- Copyright : (C) 2016 Awake Networks
|
||||
-- License : Apache-2.0
|
||||
-- Maintainer : Awake Networks <opensource@awakenetworks.com>
|
||||
-- Stability : stable
|
||||
----------------------------------------------------------------------------
|
||||
|
||||
module Network.Wreq.Docker.Image where
|
||||
|
||||
|
||||
import Control.Lens
|
||||
import Control.Monad
|
||||
import Control.Monad.Except
|
||||
import Control.Monad.Reader
|
||||
import Data.ByteString.Lazy.Char8 as C8L
|
||||
import Data.Coerce
|
||||
import Data.Either
|
||||
import Data.HashSet as Set
|
||||
import Data.Monoid
|
||||
import Data.Text (Text)
|
||||
import qualified Data.Text as Text
|
||||
import NeatInterpolation
|
||||
import qualified Network.Wreq as Wreq
|
||||
import System.FilePath.Posix as File
|
||||
import System.Terminal.Concurrent
|
||||
|
||||
import Data.Docker.Image.Types
|
||||
import Lib
|
||||
|
||||
import Network.Wreq.Docker.Image.Lib as Docker.Image
|
||||
import Network.Wreq.Docker.Registry as Docker.Registry
|
||||
import Types
|
||||
import Types.Exceptions
|
||||
import Types.ImageName
|
||||
|
||||
-- | Fetch an image from the docker registery, assembling the
|
||||
-- artifacts into a Docker V1.2 Image.
|
||||
fetchImage :: HockerMeta -> IO (Either HockerException Text)
|
||||
fetchImage =
|
||||
runHocker $ ask >>= \HockerMeta{..} -> do
|
||||
imageOutDir <- Lib.requirePath outDir
|
||||
manifest <- fetchManifest >>= checkResponseIntegrity'
|
||||
configDigest <- getConfigDigest $ manifest ^. Wreq.responseBody
|
||||
|
||||
-- TODO: use Managed
|
||||
|
||||
-- Fetch and write the configuration json file for the image
|
||||
let configFileHash = Lib.stripHashId . Text.pack $ showSHA configDigest
|
||||
imageConfig <- fetchImageConfig configDigest
|
||||
imageConfigFile <- writeRespBody
|
||||
(File.joinPath [imageOutDir, Text.unpack configFileHash] `addExtension` "json")
|
||||
configFileHash
|
||||
imageConfig
|
||||
|
||||
let refLayers = pluckRefLayersFrom $ imageConfig ^. Wreq.responseBody
|
||||
refLayers' = fmap Lib.stripHashId refLayers
|
||||
refLayerSet = Set.fromList refLayers'
|
||||
manifestLayers = pluckLayersFrom $ manifest ^. Wreq.responseBody
|
||||
(_, strippedReg) = Text.breakOnEnd "//" . Text.pack . show $ dockerRegistry
|
||||
repoTags = (Text.unpack strippedReg) </> (coerce imageName)
|
||||
|
||||
-- Concurrently fetch layers and write to disk with a limit of three
|
||||
-- threads
|
||||
layers <- mapPool 3 Docker.Image.fetchLayer $ Prelude.zip refLayers' manifestLayers
|
||||
|
||||
let writtenLayerSet = Set.fromList . fmap (Text.pack . takeBaseName) $ rights layers
|
||||
refLayerSetTxt = Text.pack (show refLayerSet)
|
||||
wrtLayerSetTxt = Text.pack (show writtenLayerSet)
|
||||
dffLayerSetTxt = Text.pack (show $ Set.difference refLayerSet writtenLayerSet)
|
||||
|
||||
when (writtenLayerSet /= refLayerSet) $
|
||||
throwError . hockerException $ Text.unpack
|
||||
([text|
|
||||
Written layers do not match the reference layers!
|
||||
|
||||
Reference layers: ${refLayerSetTxt}
|
||||
Written layers: ${wrtLayerSetTxt}
|
||||
|
||||
Difference: ${dffLayerSetTxt}
|
||||
|])
|
||||
|
||||
createImageRepository repoTags refLayers'
|
||||
createImageManifest repoTags imageConfigFile refLayers'
|
||||
|
||||
archivePath <- createImageTar
|
||||
|
||||
return (Text.pack archivePath)
|
||||
|
||||
-- | Fetch a layer using its digest key from the docker registery.
|
||||
fetchLayer :: HockerMeta -> IO (Either HockerException FilePath)
|
||||
fetchLayer =
|
||||
runHocker $ ask >>= \HockerMeta{..} -> do
|
||||
layerOut <- Lib.requirePath out
|
||||
layerDigest <- Text.pack . show <$> maybe
|
||||
(throwError $ hockerException
|
||||
"a layer digest is expected!")
|
||||
return
|
||||
imageLayer
|
||||
|
||||
let shortRef = Text.take 7 layerDigest
|
||||
|
||||
writeC <- liftIO $ getConcurrentOutputter
|
||||
liftIO . writeC . Text.unpack $ "Downloading layer: " <> shortRef
|
||||
|
||||
fetchedImageLayer <- checkResponseIntegrity' =<< Docker.Registry.fetchLayer ("sha256:" <> layerDigest)
|
||||
layerPath <- writeRespBody layerOut layerDigest fetchedImageLayer
|
||||
|
||||
liftIO . writeC $ Text.unpack ("=> wrote " <> shortRef)
|
||||
|
||||
return layerPath
|
||||
|
||||
-- | Fetch the configuration JSON file of the specified image from the
|
||||
-- docker registry.
|
||||
fetchConfig :: HockerMeta -> IO (Either HockerException C8L.ByteString)
|
||||
fetchConfig =
|
||||
runHocker $ ask >>= \HockerMeta{..} -> do
|
||||
configDigest <-
|
||||
fetchManifest
|
||||
>>= checkResponseIntegrity'
|
||||
>>= getConfigDigest . view Wreq.responseBody
|
||||
|
||||
fetchImageConfig configDigest
|
||||
>>= return . view Wreq.responseBody
|
||||
|
||||
-- | Fetch the docker registry manifest JSON file for the specified
|
||||
-- image from the docker registry..
|
||||
fetchImageManifest :: HockerMeta -> IO (Either HockerException C8L.ByteString)
|
||||
fetchImageManifest = runHocker (fetchManifest >>= return . view Wreq.responseBody)
|
@ -31,16 +31,16 @@ import qualified System.Directory as Directory
|
||||
import System.FilePath.Posix as File
|
||||
import System.Terminal.Concurrent
|
||||
|
||||
import Data.Docker.Image.V1_2.Types
|
||||
import Data.Docker.Image.Types
|
||||
import Lib
|
||||
import Network.Wreq.Docker.Registry.V2 as Docker.Registry
|
||||
import Network.Wreq.Docker.Registry as Docker.Registry
|
||||
import Types
|
||||
import Types.Exceptions
|
||||
import Types.ImageTag
|
||||
|
||||
-- | Like @mapM@ but concurrently applies a function to the elements
|
||||
-- of the @Traversable@, limiting the maximum number of worker threads
|
||||
-- by *n*.
|
||||
-- | Like @mapM@ but concurrently apply a function to the elements of
|
||||
-- the @Traversable@, limiting the maximum number of worker threads by
|
||||
-- _n_.
|
||||
mapPool :: Traversable t
|
||||
=> Int -- ^ Number of pooled worker threads
|
||||
-> ((String -> IO ()) -> a -> Hocker FilePath) -- ^ Processing function
|
||||
@ -69,7 +69,7 @@ forPool n = flip $ mapPool n
|
||||
-- | Download, verify, decompress, and write a docker container image
|
||||
-- layer to the filesystem.
|
||||
fetchLayer :: (String -> IO ()) -- ^ Concurrent terminal output function
|
||||
-> (RefLayer, Layer) -- ^ A tuple of the reference layer hash digest from the image's config JSON and the hash digest from the image's manifest JSON
|
||||
-> (RefLayer, Layer) -- ^ A tuple of the reference layer hash digest from the image's config JSON and hash digest from the image's manifest JSON
|
||||
-> Hocker FilePath
|
||||
fetchLayer writeC layer@(refl, (stripHashId -> layer')) = ask >>= \HockerMeta{..} -> do
|
||||
liftIO . writeC . Text.unpack $ "Downloading layer: " <> (Text.take 7 layer')
|
||||
@ -79,7 +79,7 @@ fetchLayer writeC layer@(refl, (stripHashId -> layer')) = ask >>= \HockerMeta{..
|
||||
let decompressed = fetchedImageLayer & Wreq.responseBody %~ GZip.decompress
|
||||
shortRef = Text.take 7 refl
|
||||
|
||||
imageOutDir <- Lib.requireOutPath outDir
|
||||
imageOutDir <- Lib.requirePath outDir
|
||||
|
||||
liftIO $ writeC " => decompressed "
|
||||
|
||||
@ -101,14 +101,14 @@ createImageManifest repoTag imageConfigFile refls = ask >>= \HockerMeta{..} -> d
|
||||
(takeBaseName imageConfigFile `addExtension` "json")
|
||||
[Text.pack (repoTag ++ ":" ++ coerce imageTag)]
|
||||
(fmap ((`addExtension` "tar") . Text.unpack) refls) ]
|
||||
imageOutDir <- Lib.requireOutPath outDir
|
||||
imageOutDir <- Lib.requirePath outDir
|
||||
liftIO $ C8L.writeFile
|
||||
(imageOutDir </> "manifest" `addExtension` "json")
|
||||
(Lib.encodeCanonical imageManifest)
|
||||
|
||||
-- | Generate a @repositories@ json file.
|
||||
--
|
||||
-- NB: it is JSON but Docker doesn't want it with a JSON extension
|
||||
-- NB: it is JSON but Docker doesn't want it a @.json@ extension
|
||||
-- unlike its sibling the @manifest.json@ file.
|
||||
createImageRepository :: RepoTag -- ^ e.g: registry.mydomain.net:5001/reponame/imagename
|
||||
-> [RefLayer] -- ^ Layer hash digests sourced from the image's configuration JSON
|
||||
@ -125,7 +125,7 @@ createImageRepository repoTag refls = ask >>= \HockerMeta{..} -> do
|
||||
(HashMap.singleton
|
||||
(Text.pack $ coerce imageTag)
|
||||
((Prelude.last refls) <> ".tar"))
|
||||
imageOutDir <- Lib.requireOutPath outDir
|
||||
imageOutDir <- Lib.requirePath outDir
|
||||
liftIO $ C8L.writeFile
|
||||
(imageOutDir </> "repositories")
|
||||
(Lib.encodeCanonical repositories)
|
||||
@ -134,8 +134,8 @@ createImageRepository repoTag refls = ask >>= \HockerMeta{..} -> do
|
||||
-- and remove the output dir.
|
||||
createImageTar :: Hocker FilePath
|
||||
createImageTar = ask >>= \HockerMeta{..} -> do
|
||||
imageOutDir <- Lib.requireOutPath outDir
|
||||
archivePath <- Lib.requireOutPath out
|
||||
imageOutDir <- Lib.requirePath outDir
|
||||
archivePath <- Lib.requirePath out
|
||||
|
||||
entries <- liftIO $ Directory.getDirectoryContents imageOutDir
|
||||
|
||||
|
@ -1,158 +0,0 @@
|
||||
{-# LANGUAGE FlexibleContexts #-}
|
||||
{-# LANGUAGE LambdaCase #-}
|
||||
{-# LANGUAGE OverloadedStrings #-}
|
||||
{-# LANGUAGE QuasiQuotes #-}
|
||||
{-# LANGUAGE RecordWildCards #-}
|
||||
{-# LANGUAGE ScopedTypeVariables #-}
|
||||
|
||||
-----------------------------------------------------------------------------
|
||||
-- |
|
||||
-- Module : Network.Wreq.Docker.Image.V1_2
|
||||
-- Copyright : (C) 2016 Awake Networks
|
||||
-- License : Apache-2.0
|
||||
-- Maintainer : Awake Networks <opensource@awakenetworks.com>
|
||||
-- Stability : stable
|
||||
----------------------------------------------------------------------------
|
||||
|
||||
module Network.Wreq.Docker.Image.V1_2 where
|
||||
|
||||
|
||||
import Control.Lens
|
||||
import Control.Monad
|
||||
import Control.Monad.Except
|
||||
import Control.Monad.Reader
|
||||
import Data.ByteString.Lazy.Char8 as C8L
|
||||
import Data.Coerce
|
||||
import Data.Either
|
||||
import Data.HashSet as Set
|
||||
import Data.Monoid
|
||||
import qualified Data.Text as T
|
||||
import NeatInterpolation
|
||||
import qualified Network.Wreq as Wreq
|
||||
import System.FilePath.Posix as File
|
||||
import System.Terminal.Concurrent
|
||||
|
||||
import Data.Docker.Image.V1.Types
|
||||
import Lib
|
||||
|
||||
import Network.Wreq.Docker.Image.Lib as Docker.Image
|
||||
import Network.Wreq.Docker.Registry.V2 as Docker.Registry
|
||||
import Types
|
||||
import Types.Exceptions
|
||||
import Types.ImageName
|
||||
|
||||
-- | Fetches an image from the specified (or default) V2 Docker
|
||||
-- Registery and assembles the artifacts into a compatible Docker V1.2
|
||||
-- Image.
|
||||
fetchAndAssemble :: HockerMeta -> IO (Either HockerException T.Text)
|
||||
fetchAndAssemble = runHocker doFetchImage
|
||||
|
||||
-- | Fetches a layer by its digest key from the specified (or default)
|
||||
-- V2 Docker Registery.
|
||||
fetchLayer :: HockerMeta -> IO (Either HockerException FilePath)
|
||||
fetchLayer = runHocker doFetchLayer
|
||||
|
||||
-- | Fetches the config file of the specified image from the specified
|
||||
-- (or default) V2 Docker Registry and returns it.
|
||||
fetchConfig :: HockerMeta -> IO (Either HockerException C8L.ByteString)
|
||||
fetchConfig = runHocker doFetchConfig
|
||||
|
||||
-- | Fetches the manifest file of the specified image from the
|
||||
-- specified (or default) V2 Docker Registry and returns it.
|
||||
fetchImageManifest :: HockerMeta -> IO (Either HockerException C8L.ByteString)
|
||||
fetchImageManifest = runHocker doFetch
|
||||
where
|
||||
doFetch = fetchManifest >>= return . view Wreq.responseBody
|
||||
|
||||
-- | Executes the monadic logic for fetching the docker image config
|
||||
-- JSON within the ReaderT monad.
|
||||
doFetchConfig :: Hocker C8L.ByteString
|
||||
doFetchConfig = ask >>= \HockerMeta{..} -> do
|
||||
configDigest <-
|
||||
fetchManifest
|
||||
>>= checkResponseIntegrity'
|
||||
>>= getConfigDigest . view Wreq.responseBody
|
||||
|
||||
fetchImageConfig configDigest
|
||||
>>= return . view Wreq.responseBody
|
||||
|
||||
-- | Executes the monadic logic for fetching and saving a layer tar
|
||||
-- archive.
|
||||
doFetchLayer :: Hocker FilePath
|
||||
doFetchLayer = ask >>= \HockerMeta{..} -> do
|
||||
layerOut <- Lib.requireOutPath out
|
||||
|
||||
layerDigest <- T.pack . show <$> maybe
|
||||
(throwError $ hockerException
|
||||
"a layer digest is expected!")
|
||||
return
|
||||
imageLayer
|
||||
|
||||
let shortRef = T.take 7 layerDigest
|
||||
|
||||
writeC <- liftIO $ getConcurrentOutputter
|
||||
liftIO . writeC . T.unpack $ "Downloading layer: " <> shortRef
|
||||
|
||||
fetchedImageLayer <- checkResponseIntegrity' =<< Docker.Registry.fetchLayer ("sha256:" <> layerDigest)
|
||||
layerPath <- writeRespBody layerOut layerDigest fetchedImageLayer
|
||||
|
||||
liftIO . writeC $ T.unpack ("=> wrote " <> shortRef)
|
||||
|
||||
return layerPath
|
||||
|
||||
-- | Executes the monadic logic for fetching, transforming, and
|
||||
-- assembling a docker container image.
|
||||
doFetchImage :: Hocker T.Text
|
||||
doFetchImage = ask >>= \HockerMeta{..} -> do
|
||||
imageOutDir <- Lib.requireOutPath outDir
|
||||
|
||||
manifest <- fetchManifest >>= checkResponseIntegrity'
|
||||
configDigest <- getConfigDigest $ manifest ^. Wreq.responseBody
|
||||
|
||||
-- TODO: ALL of the below steps that handle saving things to the
|
||||
-- disk should probably be wrapped in a bracket function responsible
|
||||
-- for cleaning up any partially written data if there's a
|
||||
-- failure... Or perhaps instad of bracketing in here, we bracket
|
||||
-- around the @runExceptT@?
|
||||
|
||||
-- Fetch and write the configuration json file for the image
|
||||
let configFileHash = Lib.stripHashId . T.pack $ showSHA configDigest
|
||||
imageConfig <- fetchImageConfig configDigest
|
||||
imageConfigFile <- writeRespBody
|
||||
(File.joinPath [imageOutDir, T.unpack configFileHash] `addExtension` "json")
|
||||
configFileHash
|
||||
imageConfig
|
||||
|
||||
let refLayers = pluckRefLayersFrom $ imageConfig ^. Wreq.responseBody
|
||||
refLayers' = fmap Lib.stripHashId refLayers
|
||||
refLayerSet = Set.fromList refLayers'
|
||||
manifestLayers = pluckLayersFrom $ manifest ^. Wreq.responseBody
|
||||
(_, strippedReg) = T.breakOnEnd "//" . T.pack . show $ dockerRegistry
|
||||
repoTags = (T.unpack strippedReg) </> (coerce imageName)
|
||||
|
||||
-- Concurrently fetch layers and write to disk with a limit of three
|
||||
-- threads
|
||||
layers <- mapPool 3 Docker.Image.fetchLayer $ Prelude.zip refLayers' manifestLayers
|
||||
|
||||
let writtenLayerSet = Set.fromList . fmap (T.pack . takeBaseName) $ rights layers
|
||||
refLayerSetTxt = T.pack (show refLayerSet)
|
||||
wrtLayerSetTxt = T.pack (show writtenLayerSet)
|
||||
dffLayerSetTxt = T.pack (show $ Set.difference refLayerSet writtenLayerSet)
|
||||
|
||||
when (writtenLayerSet /= refLayerSet) $
|
||||
throwError . hockerException $ T.unpack
|
||||
([text|
|
||||
Written layers do not match the reference layers!
|
||||
|
||||
Reference layers: ${refLayerSetTxt}
|
||||
Written layers: ${wrtLayerSetTxt}
|
||||
|
||||
Difference: ${dffLayerSetTxt}
|
||||
|])
|
||||
|
||||
createImageRepository repoTags refLayers'
|
||||
createImageManifest repoTags imageConfigFile refLayers'
|
||||
|
||||
archivePath <- createImageTar
|
||||
|
||||
return $ T.pack archivePath
|
@ -9,7 +9,7 @@
|
||||
|
||||
-----------------------------------------------------------------------------
|
||||
-- |
|
||||
-- Module : Network.Wreq.Docker.Registry.V2
|
||||
-- Module : Network.Wreq.Docker.Registry
|
||||
-- Copyright : (C) 2016 Awake Networks
|
||||
-- License : Apache-2.0
|
||||
-- Maintainer : Awake Networks <opensource@awakenetworks.com>
|
||||
@ -22,7 +22,7 @@
|
||||
-- software is "docker distribution".
|
||||
----------------------------------------------------------------------------
|
||||
|
||||
module Network.Wreq.Docker.Registry.V2 where
|
||||
module Network.Wreq.Docker.Registry where
|
||||
|
||||
import Control.Lens
|
||||
import qualified Control.Monad.Except as Except
|
||||
@ -39,14 +39,14 @@ import qualified Data.Text as Text
|
||||
import qualified Network.Wreq as Wreq
|
||||
import System.Directory
|
||||
|
||||
import Data.Docker.Image.V1.Types
|
||||
import Data.Docker.Image.Types
|
||||
import Lib
|
||||
import Types
|
||||
import Types.Exceptions
|
||||
import Types.ImageName
|
||||
import Types.ImageTag
|
||||
|
||||
-- | Default docker hub registry.
|
||||
-- | Default docker hub registry (@https://registry-1.docker.io/v2/@).
|
||||
defaultRegistry :: URIRef Absolute
|
||||
defaultRegistry = URI
|
||||
{ uriScheme = Scheme "https"
|
||||
@ -60,9 +60,22 @@ defaultRegistry = URI
|
||||
, uriFragment = Nothing
|
||||
}
|
||||
|
||||
mkAuth :: RegistryURI
|
||||
-> ImageName
|
||||
-> Maybe Credentials
|
||||
-- | Given 'Credentials', produce a 'Wreq.Auth'.
|
||||
--
|
||||
-- If 'Credentials' is either 'BearerToken' or 'Basic' then produce a
|
||||
-- 'Wreq.Auth' value for that type of credential.
|
||||
--
|
||||
-- If @Nothing@ is provided _and_ the provided 'RegistryURI' matches
|
||||
-- the default registry, make a request to
|
||||
-- @https://auth.docker.io/token@ for a temporary pull-only bearer
|
||||
-- token, assuming the request we want to make is to the public docker
|
||||
-- hub and without any other credentials.
|
||||
--
|
||||
-- Otherwise, return 'Nothing' so that an unauthenticated request can
|
||||
-- be made.
|
||||
mkAuth :: RegistryURI -- ^ Docker registry
|
||||
-> ImageName -- ^ Docker image name
|
||||
-> Maybe Credentials -- ^ Docker registry authentication credentials
|
||||
-> IO (Maybe Wreq.Auth)
|
||||
mkAuth reg (ImageName img) credentials =
|
||||
case credentials of
|
||||
@ -78,21 +91,22 @@ mkAuth reg (ImageName img) credentials =
|
||||
getHubToken = Wreq.get ("https://auth.docker.io/token?service=registry.docker.io&scope=repository:"<>img<>":pull")
|
||||
mkHubBearer rsp = (Wreq.oauth2Bearer . encodeUtf8) <$> (rsp ^? Wreq.responseBody . key "token" . _String)
|
||||
|
||||
-- | Retrieve a list of layer hash digests from an image's manifest
|
||||
-- JSON.
|
||||
-- | Retrieve a list of layer hash digests from a docker registry
|
||||
-- image manifest JSON.
|
||||
--
|
||||
-- TODO: pluck out the layer's size and digest into a tuple.
|
||||
pluckLayersFrom :: Manifest -> [Layer]
|
||||
pluckLayersFrom = toListOf (key "layers" . values . key "digest" . _String)
|
||||
|
||||
-- | Retrieve a list of layer hash digests from an image's config
|
||||
-- JSON.
|
||||
-- | Retrieve a list of layer hash digests from an image's
|
||||
-- configuration JSON.
|
||||
--
|
||||
-- This is subtly different from @pluckLayersFrom@ because both list
|
||||
-- This is subtly different from 'pluckLayersFrom' because both list
|
||||
-- hash digests for the image's layers but the manifest's layer hash
|
||||
-- digests are keys into the registry's blob storage referencing the
|
||||
-- *compressed* layer archive. The config JSON's layer hash digests
|
||||
-- reference the uncompressed layer tar archives within the image.
|
||||
-- digests are keys into the registry's blob storage referencing
|
||||
-- _compressed_ layer archives. The configuration JSON's layer hash
|
||||
-- digests reference the uncompressed layer tar archives within the
|
||||
-- image.
|
||||
pluckRefLayersFrom :: ImageConfigJSON -> [Layer]
|
||||
pluckRefLayersFrom = toListOf (key "rootfs" . key "diff_ids" . values . _String)
|
||||
|
||||
@ -110,8 +124,8 @@ fetchManifest = ask >>= \HockerMeta{..} ->
|
||||
, "application/vnd.docker.distribution.manifest.list.v2+json"
|
||||
]
|
||||
|
||||
-- | Retrieve the config json of an image by its hash digest (found in
|
||||
-- the V2 manifest for an image given by a name and tag).
|
||||
-- | Retrieve the configuratino JSON of an image by its hash digest
|
||||
-- (found in the V2 manifest for an image given by a name and a tag).
|
||||
fetchImageConfig :: (Hash.Digest Hash.SHA256) -> Hocker RspBS
|
||||
fetchImageConfig (showSHA -> digest) = ask >>= \HockerMeta{..} ->
|
||||
liftIO $ Wreq.getWith (opts auth) (mkURL imageName dockerRegistry)
|
||||
@ -132,33 +146,32 @@ fetchLayer layer = ask >>= \HockerMeta{..} ->
|
||||
registry
|
||||
= C8.unpack (serializeURIRef' $ joinURIPath [name, "blobs", digest] registry)
|
||||
|
||||
-- | Write a @Wreq@ response body to the specified @FilePath@,
|
||||
-- checking the integrity of the file with its sha256 hash digest.
|
||||
-- | Write a 'Wreq.responseBody' to the specified 'FilePath', checking
|
||||
-- the integrity of the file with its sha256 hash digest.
|
||||
--
|
||||
-- The second argument, the @StrippedDigest@, must be a hash digest
|
||||
-- stripped of the "sha256:" hash algorithm identifier prefix.
|
||||
-- The second argument, the 'StrippedDigest', must be a hash digest
|
||||
-- stripped of the @sha256:@ algorithm identifier prefix.
|
||||
writeRespBody :: FilePath -- ^ Filesystem path to write the content to
|
||||
-> StrippedDigest -- ^ Hash digest, stripped of its hash algorithm identifier prefix
|
||||
-> RspBS -- ^ Wreq lazy bytestring response object
|
||||
-> StrippedDigest -- ^ Hash digest, stripped of its algorithm identifier prefix
|
||||
-> RspBS -- ^ Wreq lazy bytestring response object
|
||||
-> Hocker FilePath
|
||||
writeRespBody out digest resp = do
|
||||
liftIO . C8L.writeFile out $ resp ^. Wreq.responseBody
|
||||
|
||||
-- Now, verify the file; we assume the sha256 function since that is
|
||||
-- used everywhere
|
||||
verified <- liftIO $ checkFileIntegrity out digest
|
||||
verified <- liftIO (checkFileIntegrity out digest)
|
||||
either (Except.throwError . hockerException) return verified
|
||||
|
||||
-- | Write a response to the filesystem without a request hash
|
||||
-- digest. Attempt to fetch the value of the `ETag` header to verify
|
||||
-- digest. Attempt to fetch the value of the @ETag@ header to verify
|
||||
-- the integrity of the content received.
|
||||
--
|
||||
-- The Docker docs do *not* recommended this method for verification
|
||||
-- because the ETag and Docker-Content-Digest headers may change
|
||||
-- The Docker docs do _not_ recommended this method for verification
|
||||
-- because the @ETag@ and @Docker-Content-Digest@ headers may change
|
||||
-- between the time you issue a request with a digest and when you
|
||||
-- receive a response back!
|
||||
--
|
||||
-- We do it anyway and leave this warning.
|
||||
writeRespBody' :: FilePath -- ^ Filesystem path to write the content to
|
||||
-> RspBS -- ^ Wreq lazy bytestring response object
|
||||
-> RspBS -- ^ Wreq lazy bytestring response object
|
||||
-> Hocker FilePath
|
||||
writeRespBody' out r = writeRespBody out etagHash r
|
||||
where
|
||||
@ -224,8 +237,7 @@ checkFileIntegrity fp digest =
|
||||
fpTxt = Text.pack fp
|
||||
in fail $ Text.unpack
|
||||
([text|
|
||||
The sha256 hash for $fpTxt: $fhTxt
|
||||
Does not match the expected digest: $digest
|
||||
|])
|
||||
|
||||
The sha256 hash for $fpTxt: $fhTxt
|
||||
Does not match the expected digest: $digest
|
||||
|])
|
||||
return fp
|
@ -30,6 +30,7 @@ import qualified Control.Monad.Reader as Reader
|
||||
import Control.Monad.Reader.Class
|
||||
import qualified Crypto.Hash as Hash
|
||||
import qualified Data.ByteString.Lazy
|
||||
import Data.Char (toUpper)
|
||||
import Data.Monoid
|
||||
import Data.Text (Text)
|
||||
import qualified Data.Text as Text
|
||||
@ -186,3 +187,8 @@ instance ParseField Credentials where
|
||||
instance ParseFields Credentials
|
||||
instance ParseRecord Credentials where
|
||||
parseRecord = fmap Options.Generic.getOnly parseRecord
|
||||
|
||||
-- | @upperFirst@ uppercases the first letter of the string.
|
||||
upperFirst :: String -> String
|
||||
upperFirst [] = []
|
||||
upperFirst (h:t) = toUpper h : t
|
||||
|
@ -4,8 +4,7 @@ module Main where
|
||||
import Test.Tasty
|
||||
import Test.Tasty.HUnit
|
||||
|
||||
import qualified Tests.Data.Docker.Image.V1 as ImageV1Tests
|
||||
import qualified Tests.Data.Docker.Image.V1_2 as ImageV1_2Tests
|
||||
import qualified Tests.Data.Docker.Image as Docker.Image
|
||||
import qualified Tests.Data.Docker.Nix.FetchDocker as FetchDockerTests
|
||||
|
||||
main :: IO ()
|
||||
@ -13,7 +12,6 @@ main = defaultMain tests
|
||||
|
||||
tests :: TestTree
|
||||
tests = testGroup "Tests"
|
||||
[ ImageV1Tests.unitTests
|
||||
, ImageV1_2Tests.unitTests
|
||||
[ Docker.Image.unitTests
|
||||
, FetchDockerTests.tests
|
||||
]
|
||||
|
@ -4,19 +4,19 @@
|
||||
|
||||
-----------------------------------------------------------------------------
|
||||
-- |
|
||||
-- Module : Data.Docker.Image.V1_2
|
||||
-- Module : Data.Docker.Image
|
||||
-- Copyright : (C) 2016 Awake Networks
|
||||
-- License : Apache-2.0
|
||||
-- Maintainer : Awake Networks <opensource@awakenetworks.com>
|
||||
-- Stability : stable
|
||||
----------------------------------------------------------------------------
|
||||
|
||||
module Tests.Data.Docker.Image.V1_2 where
|
||||
module Tests.Data.Docker.Image where
|
||||
|
||||
import Data.Aeson
|
||||
import qualified Data.ByteString.Lazy.Char8 as C8L
|
||||
import Data.Docker.Image.V1_2.Types
|
||||
import Data.HashMap.Strict as H
|
||||
import qualified Data.ByteString.Lazy.Char8 as C8L
|
||||
import Data.Docker.Image.Types
|
||||
import Data.HashMap.Strict as H
|
||||
import Test.Tasty
|
||||
import Test.Tasty.HUnit
|
||||
|
@ -1,86 +0,0 @@
|
||||
{-# LANGUAGE LambdaCase #-}
|
||||
{-# LANGUAGE OverloadedStrings #-}
|
||||
{-# LANGUAGE RecordWildCards #-}
|
||||
|
||||
-----------------------------------------------------------------------------
|
||||
-- |
|
||||
-- Module : Data.Docker.Image.V1
|
||||
-- Copyright : (C) 2016 Awake Networks
|
||||
-- License : Apache-2.0
|
||||
-- Maintainer : Awake Networks <opensource@awakenetworks.com>
|
||||
-- Stability : stable
|
||||
----------------------------------------------------------------------------
|
||||
|
||||
module Tests.Data.Docker.Image.V1 where
|
||||
|
||||
import qualified Crypto.Hash as Hash
|
||||
import qualified Data.ByteString.Char8 as C8
|
||||
import Data.Docker.Image.V1.Layer
|
||||
import Data.Docker.Image.V1.Types
|
||||
import Data.Maybe
|
||||
import Data.Sequence as Seq
|
||||
|
||||
import Test.Tasty
|
||||
import Test.Tasty.HUnit
|
||||
|
||||
import Lib
|
||||
|
||||
unitTests = testGroup "V1 Image Tests"
|
||||
[ testCase "Digest (De)Serialization" testDigest
|
||||
, testCase "Handle bad digest" testBadDigest1
|
||||
, testCase "Handle bad digest" testBadDigest2
|
||||
, testCase "Digest == ChainID" testChainID
|
||||
, testCase "Digest == DiffID" testDiffID
|
||||
, testCase "ChainID sequence generation" testChainIDGeneration
|
||||
]
|
||||
|
||||
mkHash = Lib.sha256 "somestring"
|
||||
|
||||
-- DiffID sequence from a real Docker Image.
|
||||
diffIds :: Seq DiffID
|
||||
diffIds = fromList $ fmap (DiffID . fromJust . toDigest)
|
||||
[ "sha256:f96222d75c5563900bc4dd852179b720a0885de8f7a0619ba0ac76e92542bbc8"
|
||||
, "sha256:149636c850120e59e6bb79f2fc23ed423030afc73841c221906a147d61da11a9"
|
||||
, "sha256:33c3a104206aed2ae947e03c48cc011af0a3e5b87e7ba8e7cbc298273a638921"
|
||||
, "sha256:2681a05b8f8288a384dbddf0b899ec9d2bea3ee265f1678230d0bdac6dc13da1"
|
||||
, "sha256:dcfda398b984bb5a55e1932079b6cc4823e433bd6c962f9667eaf28b0f1fe7e0"
|
||||
, "sha256:2a182bf72d68b9c7cb76be0f9dcadd047088ae6f8cb85e7ac9661f68537acccd"
|
||||
, "sha256:647af69f55fd5fdc27db7b6aa51945aec53b0b03d17095e79b4c69c6432195c7"
|
||||
, "sha256:c7ef4827bb9592e9788c1cc49e3db4e265c12f49c9b1f6c9bb99551eb5189020"
|
||||
, "sha256:f9361c1f9b1eb2d93709546fe6ad48786cea55c03c4e52d3f1cdb341e0d398da"
|
||||
]
|
||||
|
||||
-- Pre-computed golden result produced by a valid Python
|
||||
-- implementation of the ChainID sequence generation logic.
|
||||
preComputedChainIds :: Seq ChainID
|
||||
preComputedChainIds = fromList $ fmap (ChainID . fromJust . toDigest)
|
||||
[ "sha256:f96222d75c5563900bc4dd852179b720a0885de8f7a0619ba0ac76e92542bbc8"
|
||||
, "sha256:5e6f832cd2df18460af48ed117c5b63bc2189971c9346e6d952376b5a8ba74ff"
|
||||
, "sha256:19947c09eddb9dab0d1b938660cd72ea4bb8f0f24c604cf9e1d9b14772d7bd6d"
|
||||
, "sha256:b0fbea1a99ec834d59e524733f1be81f1dce325dbe9df58bba5dec7014b386c8"
|
||||
, "sha256:262faf2cc4db81d3bcb526099b7dc33069b24f4028a9a23d46edca2493077ce0"
|
||||
, "sha256:ac07dba5e07787c2a10edc3f8d8052f38cb5bec6767520bbab4289cb55b3a3f4"
|
||||
, "sha256:c781557b490e1e8ff2132af386abe2a9c2d3cb66df06ee2cbd489d869432328a"
|
||||
, "sha256:ff275e52e374819094e8035459820bf8e5fc42f287f603b445a8aee7aba2b689"
|
||||
, "sha256:ffd859ffb35598eeec1283f3ccb3633f2798c042343425f635d616633cf63c2b"
|
||||
]
|
||||
|
||||
testDigest =
|
||||
let digest = mkHash
|
||||
digestStr = showSHA digest
|
||||
in toDigest (C8.pack digestStr) @?= (Just digest)
|
||||
|
||||
testBadDigest1 = toDigest "ffd859ffb35598eeec1283f3ccb3633f2798c042343425f635d616633cf63c2b" @?= Nothing
|
||||
testBadDigest2 = toDigest "ffd859ffb35598eeec1283f3corrupt?" @?= Nothing
|
||||
|
||||
testChainID =
|
||||
let digest = mkHash
|
||||
in (show $ ChainID digest) @?= showSHA digest
|
||||
|
||||
testDiffID =
|
||||
let digest = mkHash
|
||||
in (show $ DiffID digest) @?= showSHA digest
|
||||
|
||||
testChainIDGeneration =
|
||||
let chainIDs = squishMaybe $ chainIDSequence diffIds
|
||||
in chainIDs @?= preComputedChainIds
|
@ -13,22 +13,22 @@
|
||||
|
||||
module Tests.Data.Docker.Nix.FetchDocker where
|
||||
|
||||
import Control.Exception as CE
|
||||
import Control.Monad.Except as Except
|
||||
import Data.ByteString.Lazy.Char8 as C8L
|
||||
import Data.Either (either)
|
||||
import qualified Data.Text as T
|
||||
import Control.Exception as CE
|
||||
import Control.Monad.Except as Except
|
||||
import Data.ByteString.Lazy.Char8 as C8L
|
||||
import Data.Either (either)
|
||||
import qualified Data.Text as Text
|
||||
import Network.URI
|
||||
import Test.Tasty
|
||||
import Test.Tasty.Golden
|
||||
import Test.Tasty.HUnit
|
||||
import Text.PrettyPrint.ANSI.Leijen as Text.PrettyPrint (displayS)
|
||||
import Text.PrettyPrint.ANSI.Leijen as Text.PrettyPrint (displayS)
|
||||
|
||||
import Data.Docker.Image.Types
|
||||
import Data.Docker.Nix.FetchDocker as Nix.FetchDocker
|
||||
import Data.Docker.Nix.Lib as Nix.Lib
|
||||
import Data.Docker.Nix.FetchDocker as Nix.FetchDocker
|
||||
import Data.Docker.Nix.Lib as Nix.Lib
|
||||
import Lib
|
||||
import Network.Wreq.Docker.Registry.V2 as Docker.Registry
|
||||
import Network.Wreq.Docker.Registry as Docker.Registry
|
||||
import Types
|
||||
import Types.ImageTag
|
||||
|
||||
@ -68,6 +68,6 @@ generateFetchDockerNix = do
|
||||
}
|
||||
|
||||
either
|
||||
(Lib.die . T.pack . show)
|
||||
(Lib.die . Text.pack . show)
|
||||
(return . C8L.pack . (flip displayS "") . Lib.renderNixExpr)
|
||||
nixExpression
|
||||
|
Loading…
Reference in New Issue
Block a user