mirror of
https://github.com/HuwCampbell/grenade.git
synced 2024-11-22 06:55:13 +03:00
Fix silent conversion bug in pooling layer
Add quickcheck against reference implemenations for layer internals Add skolem layer generators.
This commit is contained in:
parent
70e9b83b04
commit
424d791a30
@ -48,7 +48,7 @@ void col2im_cpu(const double* data_col, int dataOffset, const int channels,
|
||||
}
|
||||
}
|
||||
|
||||
inline int max ( int a, int b ) { return a > b ? a : b; }
|
||||
inline double max ( double a, double b ) { return a > b ? a : b; }
|
||||
|
||||
void pool_forwards_cpu(const double* data_im, int dataOffset, const int channels,
|
||||
const int height, const int width, const int kernel_h, const int kernel_w,
|
||||
@ -63,19 +63,24 @@ void pool_forwards_cpu(const double* data_im, int dataOffset, const int channels
|
||||
for (int fitting_height = 0; fitting_height <= (height - kernel_h); fitting_height += stride_h) {
|
||||
for (int fitting_width = 0; fitting_width <= (width - kernel_w); fitting_width += stride_w) {
|
||||
// Start with the value in 0,0
|
||||
int max_value = data_im[fitting_height * width + fitting_width + channel_size * channel];
|
||||
int max_index = fitting_height * width + fitting_width + channel_size * channel;
|
||||
double max_value = data_im[max_index];
|
||||
// Initial row, skipping the corner we've done
|
||||
for (int kernel_col = 1; kernel_col < kernel_w; kernel_col++) {
|
||||
int input_row = fitting_height;
|
||||
int input_col = fitting_width + kernel_col;
|
||||
max_value = max ( max_value, data_im[input_row * width + input_col + channel_size * channel] );
|
||||
int input_row = fitting_height;
|
||||
int input_col = fitting_width + kernel_col;
|
||||
int data_index = input_row * width + input_col + channel_size * channel;
|
||||
double data_value = data_im[data_index];
|
||||
max_value = max ( max_value, data_value );
|
||||
}
|
||||
// The remaining rows
|
||||
for (int kernel_row = 1; kernel_row < kernel_h; kernel_row++) {
|
||||
for (int kernel_col = 0; kernel_col < kernel_w; kernel_col++) {
|
||||
int input_row = fitting_height + kernel_row;
|
||||
int input_col = fitting_width + kernel_col;
|
||||
max_value = max ( max_value, data_im[input_row * width + input_col + channel_size * channel] );
|
||||
int input_row = fitting_height + kernel_row;
|
||||
int input_col = fitting_width + kernel_col;
|
||||
int data_index = input_row * width + input_col + channel_size * channel;
|
||||
double data_value = data_im[data_index];
|
||||
max_value = max ( max_value, data_value );
|
||||
}
|
||||
}
|
||||
*(data_pooled++) = max_value;
|
||||
@ -99,27 +104,27 @@ void pool_backwards_cpu(const double* data_im, int data_im_offset,
|
||||
for (int channel = 0; channel < channels; channel++) {
|
||||
for (int fitting_height = 0; fitting_height <= (height - kernel_h); fitting_height += stride_h) {
|
||||
for (int fitting_width = 0; fitting_width <= (width - kernel_w); fitting_width += stride_w) {
|
||||
int max_index = fitting_height * width + fitting_width + channel_size * channel;
|
||||
int max_value = data_im[max_index];
|
||||
int max_index = fitting_height * width + fitting_width + channel_size * channel;
|
||||
double max_value = data_im[max_index];
|
||||
for (int kernel_col = 1; kernel_col < kernel_w; kernel_col++) {
|
||||
int input_row = fitting_height;
|
||||
int input_col = fitting_width + kernel_col;
|
||||
int data_index = input_row * width + input_col + channel_size * channel;
|
||||
int data_value = data_im[data_index];
|
||||
int input_row = fitting_height;
|
||||
int input_col = fitting_width + kernel_col;
|
||||
int data_index = input_row * width + input_col + channel_size * channel;
|
||||
double data_value = data_im[data_index];
|
||||
if ( data_value > max_value ) {
|
||||
max_value = data_value;
|
||||
max_index = data_index;
|
||||
max_value = data_value;
|
||||
}
|
||||
}
|
||||
for (int kernel_row = 1; kernel_row < kernel_h; kernel_row++) {
|
||||
for (int kernel_col = 0; kernel_col < kernel_w; kernel_col++) {
|
||||
int input_row = fitting_height + kernel_row;
|
||||
int input_col = fitting_width + kernel_col;
|
||||
int data_index = input_row * width + input_col + channel_size * channel;
|
||||
int data_value = data_im[data_index];
|
||||
int input_row = fitting_height + kernel_row;
|
||||
int input_col = fitting_width + kernel_col;
|
||||
int data_index = input_row * width + input_col + channel_size * channel;
|
||||
double data_value = data_im[data_index];
|
||||
if ( data_value > max_value ) {
|
||||
max_value = data_value;
|
||||
max_index = data_index;
|
||||
max_value = data_value;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -106,11 +106,17 @@ test-suite test
|
||||
base >= 4.8 && < 5
|
||||
, grenade
|
||||
, ambiata-disorder-core
|
||||
, ambiata-disorder-jack
|
||||
, hmatrix
|
||||
, mtl
|
||||
, singletons
|
||||
, text == 1.2.*
|
||||
, typelits-witnesses
|
||||
, constraints
|
||||
, QuickCheck >= 2.7 && < 2.9
|
||||
, quickcheck-instances == 0.3.*
|
||||
, MonadRandom
|
||||
, random
|
||||
|
||||
|
||||
benchmark bench
|
||||
|
@ -1 +1 @@
|
||||
Subproject commit 43d08f1b4b3e0d43aa3233b7b5a3ea785c1d357b
|
||||
Subproject commit c27c6df2f43178f9b20e911f9e5771183fdb724a
|
@ -34,8 +34,8 @@ foreign import ccall unsafe
|
||||
:: Ptr Double -> Int -> Int -> Int -> Int -> Int -> Int -> Int -> Int -> Ptr Double -> IO ()
|
||||
|
||||
poolBackward :: Int -> Int -> Int -> Int -> Int -> Int -> Int -> Matrix Double -> Matrix Double -> Matrix Double
|
||||
poolBackward channels height width kernelRows kernelColumns strideRows strideColumns dataCol dataGrad =
|
||||
let vecIm = flatten dataCol
|
||||
poolBackward channels height width kernelRows kernelColumns strideRows strideColumns dataIm dataGrad =
|
||||
let vecIm = flatten dataIm
|
||||
vecGrad = flatten dataGrad
|
||||
in unsafePerformIO $ do
|
||||
outPtr <- mallocForeignPtrArray0 (height * width * channels)
|
||||
|
@ -1,234 +1,95 @@
|
||||
{-# LANGUAGE TemplateHaskell #-}
|
||||
{-# LANGUAGE DataKinds #-}
|
||||
{-# LANGUAGE GADTs #-}
|
||||
{-# LANGUAGE ScopedTypeVariables #-}
|
||||
{-# LANGUAGE KindSignatures #-}
|
||||
{-# LANGUAGE ConstraintKinds #-}
|
||||
{-# LANGUAGE TypeOperators #-}
|
||||
|
||||
{-# OPTIONS_GHC -fno-warn-missing-signatures #-}
|
||||
module Test.Grenade.Layers.Convolution where
|
||||
|
||||
import Unsafe.Coerce
|
||||
import Data.Constraint
|
||||
import Data.Proxy
|
||||
import Data.Singletons ()
|
||||
import GHC.TypeLits
|
||||
import GHC.TypeLits.Witnesses
|
||||
|
||||
import Grenade.Core.Shape
|
||||
import Grenade.Core.Network
|
||||
import Grenade.Layers.Convolution
|
||||
import Grenade.Layers.Internal.Convolution
|
||||
|
||||
import Numeric.LinearAlgebra hiding (uniformSample, konst, (===))
|
||||
import qualified Numeric.LinearAlgebra.Static as HStatic
|
||||
|
||||
import Test.QuickCheck hiding ((><))
|
||||
import Disorder.Jack
|
||||
|
||||
prop_im2col_no_stride = once $
|
||||
let input = (3><4)
|
||||
[ 1.0, 2.0, 3.0, 4.0
|
||||
, 5.0, 6.0, 7.0, 8.0
|
||||
, 9.0, 10.0, 11.0, 12.0 ]
|
||||
expected = (6><4)
|
||||
[ 1.0, 2.0, 5.0, 6.0
|
||||
, 2.0, 3.0, 6.0, 7.0
|
||||
, 3.0, 4.0, 7.0, 8.0
|
||||
, 5.0, 6.0, 9.0, 10.0
|
||||
, 6.0, 7.0, 10.0, 11.0
|
||||
, 7.0, 8.0, 11.0, 12.0 ]
|
||||
out = im2col 2 2 1 1 input
|
||||
in expected === out
|
||||
import Test.Jack.Hmatrix
|
||||
|
||||
prop_im2col_stride = once $
|
||||
let input = (3><4)
|
||||
[ 1.0, 2.0, 3.0, 4.0
|
||||
, 5.0, 6.0, 7.0, 8.0
|
||||
, 9.0, 10.0, 11.0, 12.0 ]
|
||||
expected = (4><4)
|
||||
[ 1.0, 2.0, 5.0, 6.0
|
||||
, 3.0, 4.0, 7.0, 8.0
|
||||
, 5.0, 6.0, 9.0, 10.0
|
||||
, 7.0, 8.0, 11.0, 12.0 ]
|
||||
out = im2col 2 2 1 2 input
|
||||
in expected === out
|
||||
data OpaqueConvolution :: * where
|
||||
OpaqueConvolution :: Convolution channels filters kernelRows kernelColumns strideRows strideColumns -> OpaqueConvolution
|
||||
|
||||
prop_im2col_other = once $
|
||||
let input = (3><4)
|
||||
[ 1.0, 2.0, 3.0, 4.0
|
||||
, 5.0, 6.0, 7.0, 8.0
|
||||
, 9.0, 10.0, 11.0, 12.0 ]
|
||||
expected = (2><6)
|
||||
[ 1.0, 2.0, 5.0, 6.0 , 9.0, 10.0
|
||||
, 3.0, 4.0, 7.0, 8.0 , 11.0 ,12.0 ]
|
||||
out = im2col 3 2 1 2 input
|
||||
in expected === out
|
||||
instance Show OpaqueConvolution where
|
||||
show (OpaqueConvolution n) = show n
|
||||
|
||||
-- If there's no overlap (stride is the same size as the kernel)
|
||||
-- then col2im . im2col should be symmetric.
|
||||
prop_im2col_sym_on_same_stride = once $
|
||||
let input = (3><4)
|
||||
[ 1.0, 2.0, 3.0, 4.0
|
||||
, 5.0, 6.0, 7.0, 8.0
|
||||
, 9.0, 10.0, 11.0, 12.0 ]
|
||||
out = col2im 3 2 3 2 3 4 . im2col 3 2 3 2 $ input
|
||||
in input === out
|
||||
genOpaqueOpaqueConvolution :: Jack OpaqueConvolution
|
||||
genOpaqueOpaqueConvolution = do
|
||||
Just channels <- someNatVal <$> choose (1, 10)
|
||||
Just filters <- someNatVal <$> choose (1, 10)
|
||||
Just kernel_h <- someNatVal <$> choose (2, 20)
|
||||
Just kernel_w <- someNatVal <$> choose (2, 20)
|
||||
Just stride_h <- someNatVal <$> choose (1, 10)
|
||||
Just stride_w <- someNatVal <$> choose (1, 10)
|
||||
case (channels, filters, kernel_h, kernel_w, stride_h, stride_w) of
|
||||
( SomeNat (pch :: Proxy ch), SomeNat (_ :: Proxy fl),
|
||||
SomeNat (pkr :: Proxy kr), SomeNat (pkc :: Proxy kc),
|
||||
SomeNat (_ :: Proxy sr), SomeNat (_ :: Proxy sc)) ->
|
||||
let p1 = natDict pkr
|
||||
p2 = natDict pkc
|
||||
p3 = natDict pch
|
||||
in case p1 %* p2 %* p3 of
|
||||
Dict -> OpaqueConvolution <$> (Convolution <$> uniformSample <*> uniformSample :: Jack (Convolution ch fl kr kc sr sc))
|
||||
|
||||
-- If there is an overlap, then the gradient passed back should be
|
||||
-- the sum of the gradients across the filters.
|
||||
prop_im2col_col2im_additive = once $
|
||||
let input = (3><4)
|
||||
[ 1.0, 1.0, 1.0, 1.0
|
||||
, 1.0, 1.0, 1.0, 1.0
|
||||
, 1.0, 1.0, 1.0, 1.0 ]
|
||||
expected = (3><4)
|
||||
[ 1.0, 2.0, 2.0, 1.0
|
||||
, 2.0, 4.0, 4.0, 2.0
|
||||
, 1.0, 2.0, 2.0, 1.0 ]
|
||||
out = col2im 2 2 1 1 3 4 . im2col 2 2 1 1 $ input
|
||||
in expected === out
|
||||
prop_conv_net_witness =
|
||||
gamble genOpaqueOpaqueConvolution $ \onet ->
|
||||
(case onet of
|
||||
(OpaqueConvolution ((Convolution _ _) :: Convolution channels filters kernelRows kernelCols strideRows strideCols)) -> True
|
||||
) :: Bool
|
||||
|
||||
prop_simple_conv_forwards = once $
|
||||
-- Create a convolution kernel with 4 filters.
|
||||
-- [ 1, 0 [ 0, 1 [ 0, 1 [ 0, 0
|
||||
-- , 0,-1 ] ,-1, 0 ] , 1, 0 ] ,-1,-1 ]
|
||||
let myKernel = HStatic.matrix
|
||||
[ 1.0, 0.0, 0.0, 0.0
|
||||
, 0.0, 1.0, 1.0, 0.0
|
||||
, 0.0, -1.0, 1.0, -1.0
|
||||
,-1.0, 0.0, 0.0, -1.0 ] :: HStatic.L 4 4
|
||||
zeroKernel = HStatic.matrix
|
||||
[ 0.0, 0.0, 0.0, 0.0
|
||||
, 0.0, 0.0, 0.0, 0.0
|
||||
, 0.0, 0.0, 0.0, 0.0
|
||||
, 0.0, 0.0, 0.0, 0.0 ] :: HStatic.L 4 4
|
||||
prop_conv_net =
|
||||
gamble genOpaqueOpaqueConvolution $ \onet ->
|
||||
(case onet of
|
||||
(OpaqueConvolution (convLayer@(Convolution _ _) :: Convolution channels filters kernelRows kernelCols strideRows strideCols)) ->
|
||||
let ok stride kernel = [extent | extent <- [(kernel + 1) .. 30 ], (extent - kernel) `mod` stride == 0]
|
||||
ch = fromIntegral $ natVal (Proxy :: Proxy channels)
|
||||
kr = fromIntegral $ natVal (Proxy :: Proxy kernelRows)
|
||||
kc = fromIntegral $ natVal (Proxy :: Proxy kernelCols)
|
||||
sr = fromIntegral $ natVal (Proxy :: Proxy strideRows)
|
||||
sc = fromIntegral $ natVal (Proxy :: Proxy strideCols)
|
||||
|
||||
expectedGradient = HStatic.matrix
|
||||
[ 1.0, 0.0, 0.0, 2.0
|
||||
, 2.0, 0.0, 0.0, 5.0
|
||||
, 3.0, 0.0, 0.0, 4.0
|
||||
, 4.0, 0.0, 0.0, 6.0 ] :: HStatic.L 4 4
|
||||
|
||||
convLayer = Convolution myKernel zeroKernel :: Convolution 1 4 2 2 1 1
|
||||
|
||||
input = S2D' (HStatic.matrix
|
||||
[ 1.0, 2.0, 5.0
|
||||
, 3.0, 4.0, 6.0] :: HStatic.L 2 3)
|
||||
|
||||
expect = HStatic.matrix
|
||||
[ -3.0 , -4.0
|
||||
, -1.0 , 1.0
|
||||
, 5.0 , 9.0
|
||||
, -7.0 , -10.0 ] :: HStatic.L 4 2
|
||||
out = runForwards convLayer input :: S' ('D3 1 2 4)
|
||||
|
||||
grad = S3D' ( HStatic.matrix
|
||||
[ 1 , 0
|
||||
, 0 , 0
|
||||
, 0 , 0
|
||||
, 0 , 1 ] :: HStatic.L 4 2 ) :: S' ('D3 1 2 4)
|
||||
|
||||
expectBack = (HStatic.matrix
|
||||
[ 1.0, 0.0, 0.0
|
||||
, 0.0, -2.0,-1.0] :: HStatic.L 2 3)
|
||||
(nc, inX) = runBackwards convLayer input grad
|
||||
|
||||
in case (out, inX, nc) of
|
||||
(S3D' out' , S2D' inX', Convolution' backGrad)
|
||||
-> (HStatic.extract expect === HStatic.extract out')
|
||||
.&&. (HStatic.extract expectBack === HStatic.extract inX')
|
||||
.&&. (HStatic.extract expectedGradient === HStatic.extract backGrad)
|
||||
|
||||
prop_vid2col_no_stride = once $
|
||||
let input = (6><4)
|
||||
[ 1.0, 2.0, 3.0, 4.0
|
||||
, 5.0, 6.0, 7.0, 8.0
|
||||
, 9.0, 10.0, 11.0, 12.0
|
||||
-- -- --
|
||||
, 21.0, 22.0, 23.0, 24.0
|
||||
, 25.0, 26.0, 27.0, 28.0
|
||||
, 29.0, 30.0, 31.0, 32.0 ]
|
||||
expected = (6><8)
|
||||
[ 1.0, 2.0, 5.0, 6.0 , 21.0, 22.0, 25.0, 26.0
|
||||
, 2.0, 3.0, 6.0, 7.0 , 22.0, 23.0, 26.0, 27.0
|
||||
, 3.0, 4.0, 7.0, 8.0 , 23.0, 24.0, 27.0, 28.0
|
||||
, 5.0, 6.0, 9.0, 10.0 , 25.0, 26.0, 29.0, 30.0
|
||||
, 6.0, 7.0, 10.0, 11.0 , 26.0, 27.0, 30.0, 31.0
|
||||
, 7.0, 8.0, 11.0, 12.0 , 27.0, 28.0, 31.0, 32.0 ]
|
||||
out_c = vid2col 2 2 1 1 3 4 input
|
||||
in expected === out_c
|
||||
|
||||
prop_vid2col_stride = once $
|
||||
let input = (6><4)
|
||||
[ 1.0, 2.0, 3.0, 4.0
|
||||
, 5.0, 6.0, 7.0, 8.0
|
||||
, 9.0, 10.0, 11.0, 12.0
|
||||
-- -- -- -- --
|
||||
, 21.0, 22.0, 23.0, 24.0
|
||||
, 25.0, 26.0, 27.0, 28.0
|
||||
, 29.0, 30.0, 31.0, 32.0 ]
|
||||
expected = (4><8)
|
||||
[ 1.0, 2.0, 5.0, 6.0 , 21.0, 22.0, 25.0, 26.0
|
||||
, 3.0, 4.0, 7.0, 8.0 , 23.0, 24.0, 27.0, 28.0
|
||||
, 5.0, 6.0, 9.0, 10.0 , 25.0, 26.0, 29.0, 30.0
|
||||
, 7.0, 8.0, 11.0, 12.0 , 27.0, 28.0, 31.0, 32.0 ]
|
||||
out_c = vid2col 2 2 1 2 3 4 input
|
||||
in expected === out_c
|
||||
|
||||
prop_vid2col_invert = once $
|
||||
let input = (6><4)
|
||||
[ 1.0, 2.0, 3.0, 4.0
|
||||
, 5.0, 6.0, 7.0, 8.0
|
||||
, 9.0, 10.0, 11.0, 12.0
|
||||
-- -- -- -- --
|
||||
, 21.0, 22.0, 23.0, 24.0
|
||||
, 25.0, 26.0, 27.0, 28.0
|
||||
, 29.0, 30.0, 31.0, 32.0 ]
|
||||
out = col2vid 3 2 3 2 3 4 . vid2col 3 2 3 2 3 4 $ input
|
||||
in input === out
|
||||
|
||||
|
||||
-- This test show that 2D convs act the same
|
||||
-- 3D convs with one layer
|
||||
prop_single_conv_forwards = once $
|
||||
-- Create a convolution kernel with 4 filters.
|
||||
-- [ 1, 0 [ 0, 1 [ 0, 1 [ 0, 0
|
||||
-- , 0,-1 ] ,-1, 0 ] , 1, 0 ] ,-1,-1 ]
|
||||
let myKernel = (HStatic.matrix
|
||||
[ 1.0, 0.0, 0.0, 0.0
|
||||
, 0.0, 1.0, 1.0, 0.0
|
||||
, 0.0, -1.0, 1.0, -1.0
|
||||
,-1.0, 0.0, 0.0, -1.0 ] :: HStatic.L 4 4)
|
||||
zeroKernel = (HStatic.matrix
|
||||
[ 0.0, 0.0, 0.0, 0.0
|
||||
, 0.0, 0.0, 0.0, 0.0
|
||||
, 0.0, 0.0, 0.0, 0.0
|
||||
, 0.0, 0.0, 0.0, 0.0 ] :: HStatic.L 4 4)
|
||||
|
||||
expectedGradient = (HStatic.matrix
|
||||
[ 1.0, 0.0, 0.0, 2.0
|
||||
, 2.0, 0.0, 0.0, 5.0
|
||||
, 3.0, 0.0, 0.0, 4.0
|
||||
, 4.0, 0.0, 0.0, 6.0 ] :: HStatic.L 4 4)
|
||||
|
||||
convLayer = Convolution myKernel zeroKernel :: Convolution 1 4 2 2 1 1
|
||||
|
||||
input = S3D' ( HStatic.matrix
|
||||
[ 1.0, 2.0, 5.0
|
||||
, 3.0, 4.0, 6.0] :: HStatic.L 2 3 ) :: S' ('D3 2 3 1)
|
||||
|
||||
expect = HStatic.matrix
|
||||
[ -3.0 , -4.0
|
||||
, -1.0 , 1.0
|
||||
, 5.0 , 9.0
|
||||
, -7.0 , -10.0 ] :: HStatic.L 4 2
|
||||
out = runForwards convLayer input :: S' ('D3 1 2 4)
|
||||
|
||||
grad = S3D' (HStatic.matrix
|
||||
[ 1 , 0
|
||||
, 0 , 0
|
||||
, 0 , 0
|
||||
, 0 , 1 ] :: HStatic.L 4 2 ) :: S' ('D3 1 2 4)
|
||||
|
||||
expectBack = (HStatic.matrix
|
||||
[ 1.0, 0.0, 0.0
|
||||
, 0.0, -2.0,-1.0] :: HStatic.L 2 3)
|
||||
(nc, inX) = runBackwards convLayer input grad
|
||||
|
||||
in case (out, inX, nc) of
|
||||
(S3D' out' , S3D' inX', Convolution' backGrad)
|
||||
-> (HStatic.extract expect === HStatic.extract out')
|
||||
.&&. (HStatic.extract expectBack === HStatic.extract inX')
|
||||
.&&. (HStatic.extract expectedGradient === HStatic.extract backGrad)
|
||||
in gamble (elements (ok sr kr)) $ \er ->
|
||||
gamble (elements (ok sc kc)) $ \ec ->
|
||||
let i = fromIntegral (er * ec * ch)
|
||||
rr = ((er - kr) `div` sr) + 1
|
||||
rc = ((ec - kc) `div` sc) + 1
|
||||
er' = someNatVal er
|
||||
ec' = someNatVal ec
|
||||
rr' = someNatVal rr
|
||||
rc' = someNatVal rc
|
||||
in gamble (vectorOf i sizedRealFrac) $ \(input :: [Double]) ->
|
||||
case (er', ec', rr', rc') of
|
||||
(Just (SomeNat (pinr :: Proxy inRows)), Just (SomeNat (_ :: Proxy inCols)), Just (SomeNat (pour :: Proxy outRows)), Just (SomeNat (_ :: Proxy outCols))) ->
|
||||
let p1 = natDict pinr
|
||||
p2 = natDict pour
|
||||
in case ( p1 %* natDict (Proxy :: Proxy channels)
|
||||
, p2 %* natDict (Proxy :: Proxy filters)
|
||||
-- Fake it till you make it.
|
||||
, (unsafeCoerce (Dict :: Dict ()) :: Dict (((outRows - 1) * strideRows) ~ (inRows - kernelRows)))
|
||||
, (unsafeCoerce (Dict :: Dict ()) :: Dict (((outCols - 1) * strideCols) ~ (inCols - kernelCols)))) of
|
||||
(Dict, Dict, Dict, Dict) -> let x :: S' ('D3 outRows outCols filters) = runForwards convLayer ((S3D' (HStatic.matrix input)) :: S' ('D3 inRows inCols channels))
|
||||
in x `seq` True
|
||||
_ -> False
|
||||
) :: Property
|
||||
|
||||
return []
|
||||
tests :: IO Bool
|
||||
|
57
test/Test/Grenade/Layers/FullyConnected.hs
Normal file
57
test/Test/Grenade/Layers/FullyConnected.hs
Normal file
@ -0,0 +1,57 @@
|
||||
{-# LANGUAGE BangPatterns #-}
|
||||
{-# LANGUAGE TemplateHaskell #-}
|
||||
{-# LANGUAGE DataKinds #-}
|
||||
{-# LANGUAGE KindSignatures #-}
|
||||
{-# LANGUAGE GADTs #-}
|
||||
{-# LANGUAGE ScopedTypeVariables #-}
|
||||
{-# OPTIONS_GHC -fno-warn-missing-signatures #-}
|
||||
module Test.Grenade.Layers.FullyConnected where
|
||||
|
||||
import Data.Proxy
|
||||
import Data.Singletons ()
|
||||
|
||||
import GHC.TypeLits
|
||||
|
||||
import Grenade.Core.Shape
|
||||
import Grenade.Core.Network
|
||||
import Grenade.Layers.FullyConnected
|
||||
|
||||
import qualified Numeric.LinearAlgebra.Static as HStatic
|
||||
|
||||
import Disorder.Jack
|
||||
|
||||
import Test.Jack.Hmatrix
|
||||
|
||||
|
||||
data OpaqueFullyConnected :: * where
|
||||
OpaqueFullyConnected :: (KnownNat i, KnownNat o) => FullyConnected i o -> OpaqueFullyConnected
|
||||
|
||||
instance Show OpaqueFullyConnected where
|
||||
show (OpaqueFullyConnected n) = show n
|
||||
|
||||
genOpaqueFullyConnected :: Jack OpaqueFullyConnected
|
||||
genOpaqueFullyConnected = do
|
||||
input :: Integer <- choose (2, 100)
|
||||
output :: Integer <- choose (2, 100)
|
||||
let Just input' = someNatVal input
|
||||
let Just output' = someNatVal output
|
||||
case (input', output') of
|
||||
(SomeNat (Proxy :: Proxy i'), SomeNat (Proxy :: Proxy o')) -> do
|
||||
wB <- randomVector
|
||||
bM <- randomVector
|
||||
wN <- uniformSample
|
||||
kM <- uniformSample
|
||||
return . OpaqueFullyConnected $ (FullyConnected wB bM wN kM :: FullyConnected i' o')
|
||||
|
||||
prop_fully_connected_forwards :: Property
|
||||
prop_fully_connected_forwards =
|
||||
gamble genOpaqueFullyConnected $ \(OpaqueFullyConnected (fclayer :: FullyConnected i o)) ->
|
||||
let i = fromIntegral $ natVal (Proxy :: Proxy i)
|
||||
in gamble (vectorOf i sizedRealFrac) $ \input ->
|
||||
let x :: S' ('D1 o) = runForwards fclayer (S1D' (HStatic.vector input :: HStatic.R i))
|
||||
in x `seq` True
|
||||
|
||||
|
||||
return []
|
||||
tests :: IO Bool
|
||||
tests = $quickCheckAll
|
54
test/Test/Grenade/Layers/Internal/Convolution.hs
Normal file
54
test/Test/Grenade/Layers/Internal/Convolution.hs
Normal file
@ -0,0 +1,54 @@
|
||||
{-# LANGUAGE TemplateHaskell #-}
|
||||
{-# LANGUAGE DataKinds #-}
|
||||
{-# LANGUAGE GADTs #-}
|
||||
{-# LANGUAGE ScopedTypeVariables #-}
|
||||
{-# LANGUAGE ConstraintKinds #-}
|
||||
{-# LANGUAGE TypeOperators #-}
|
||||
|
||||
{-# OPTIONS_GHC -fno-warn-missing-signatures #-}
|
||||
module Test.Grenade.Layers.Internal.Convolution where
|
||||
|
||||
-- import Control.Monad.Random
|
||||
|
||||
import Grenade.Layers.Internal.Convolution
|
||||
|
||||
import Numeric.LinearAlgebra hiding (uniformSample, konst, (===))
|
||||
|
||||
import Disorder.Jack
|
||||
|
||||
import qualified Test.Grenade.Layers.Internal.Reference as Reference
|
||||
|
||||
prop_im2col_col2im_symmetrical_with_kernel_stride =
|
||||
let factors n = [x | x <- [1..n], n `mod` x == 0]
|
||||
in gamble (choose (2, 100)) $ \height ->
|
||||
gamble (choose (2, 100)) $ \width ->
|
||||
gamble ((height `div`) <$> elements (factors height)) $ \kernel_h ->
|
||||
gamble ((width `div`) <$> elements (factors width)) $ \kernel_w ->
|
||||
gamble (listOfN (height * width) (height * width) sizedRealFrac) $ \input ->
|
||||
let input' = (height >< width) input
|
||||
stride_h = kernel_h
|
||||
stride_w = kernel_w
|
||||
out = col2im kernel_h kernel_w stride_h stride_w height width . im2col kernel_h kernel_w stride_h stride_w $ input'
|
||||
in input' === out
|
||||
|
||||
prop_im2col_col2im_behaves_as_reference =
|
||||
let ok extent kernel = [stride | stride <- [1..extent], (extent - kernel) `mod` stride == 0]
|
||||
in gamble (choose (2, 100)) $ \height ->
|
||||
gamble (choose (2, 100)) $ \width ->
|
||||
gamble (choose (2, height - 1)) $ \kernel_h ->
|
||||
gamble (choose (2, width - 1)) $ \kernel_w ->
|
||||
gamble (elements (ok height kernel_h)) $ \stride_h ->
|
||||
gamble (elements (ok width kernel_w)) $ \stride_w ->
|
||||
gamble (listOfN (height * width) (height * width) sizedRealFrac) $ \input ->
|
||||
let input' = (height >< width) input
|
||||
outFast = im2col kernel_h kernel_w stride_h stride_w input'
|
||||
retFast = col2im kernel_h kernel_w stride_h stride_w height width outFast
|
||||
|
||||
outReference = Reference.im2col kernel_h kernel_w stride_h stride_w input'
|
||||
retReference = Reference.col2im kernel_h kernel_w stride_h stride_w height width outReference
|
||||
in outFast === outReference .&&. retFast === retReference
|
||||
|
||||
|
||||
return []
|
||||
tests :: IO Bool
|
||||
tests = $quickCheckAll
|
59
test/Test/Grenade/Layers/Internal/Pooling.hs
Normal file
59
test/Test/Grenade/Layers/Internal/Pooling.hs
Normal file
@ -0,0 +1,59 @@
|
||||
{-# LANGUAGE TemplateHaskell #-}
|
||||
{-# LANGUAGE DataKinds #-}
|
||||
{-# LANGUAGE GADTs #-}
|
||||
{-# LANGUAGE ScopedTypeVariables #-}
|
||||
{-# LANGUAGE ConstraintKinds #-}
|
||||
{-# LANGUAGE TypeOperators #-}
|
||||
|
||||
{-# OPTIONS_GHC -fno-warn-missing-signatures #-}
|
||||
module Test.Grenade.Layers.Internal.Pooling where
|
||||
|
||||
import Grenade.Layers.Internal.Pooling
|
||||
|
||||
import Numeric.LinearAlgebra hiding (uniformSample, konst, (===))
|
||||
|
||||
import Disorder.Jack
|
||||
|
||||
import qualified Test.Grenade.Layers.Internal.Reference as Reference
|
||||
|
||||
prop_poolForwards_poolBackwards_behaves_as_reference =
|
||||
let ok extent kernel = [stride | stride <- [1..extent], (extent - kernel) `mod` stride == 0]
|
||||
output extent kernel stride = (extent - kernel) `div` stride + 1
|
||||
in gamble (choose (2, 100)) $ \height ->
|
||||
gamble (choose (2, 100)) $ \width ->
|
||||
gamble (choose (2, height - 1)) $ \kernel_h ->
|
||||
gamble (choose (2, width - 1)) $ \kernel_w ->
|
||||
gamble (elements (ok height kernel_h)) $ \stride_h ->
|
||||
gamble (elements (ok width kernel_w)) $ \stride_w ->
|
||||
gamble (listOfN (height * width) (height * width) sizedRealFrac) $ \input ->
|
||||
let input' = (height >< width) input
|
||||
outFast = poolForward 1 height width kernel_h kernel_w stride_h stride_w input'
|
||||
-- retFast = poolBackward 1 height width kernel_h kernel_w stride_h stride_w input' outFast
|
||||
|
||||
outReference = Reference.poolForward kernel_h kernel_w stride_h stride_w (output height kernel_h stride_h) (output width kernel_w stride_w) input'
|
||||
-- retReference = Reference.poolBackward kernel_h kernel_w stride_h stride_w input' outReference
|
||||
in outFast === outReference -- .&&. retFast === retReference
|
||||
|
||||
|
||||
prop_poolForwards_poolBackwards_symmetry =
|
||||
let factors n = [x | x <- [1..n], n `mod` x == 0]
|
||||
output extent kernel stride = (extent - kernel) `div` stride + 1
|
||||
in gamble (choose (2, 100)) $ \height ->
|
||||
gamble (choose (2, 100)) $ \width ->
|
||||
gamble ((height `div`) <$> elements (factors height)) $ \kernel_h ->
|
||||
gamble ((width `div`) <$> elements (factors width)) $ \kernel_w ->
|
||||
gamble (listOfN (height * width) (height * width) sizedRealFrac) $ \input ->
|
||||
let input' = (height >< width) input
|
||||
stride_h = kernel_h
|
||||
stride_w = kernel_w
|
||||
outFast = poolForward 1 height width kernel_h kernel_w stride_h stride_w input'
|
||||
retFast = poolBackward 1 height width kernel_h kernel_w stride_h stride_w input' outFast
|
||||
|
||||
outReference = Reference.poolForward kernel_h kernel_w stride_h stride_w (output height kernel_h stride_h) (output width kernel_w stride_w) input'
|
||||
retReference = Reference.poolBackward kernel_h kernel_w stride_h stride_w input' outReference
|
||||
in outFast === outReference .&&. retFast === retReference
|
||||
|
||||
|
||||
return []
|
||||
tests :: IO Bool
|
||||
tests = $quickCheckAll
|
129
test/Test/Grenade/Layers/Internal/Reference.hs
Normal file
129
test/Test/Grenade/Layers/Internal/Reference.hs
Normal file
@ -0,0 +1,129 @@
|
||||
module Test.Grenade.Layers.Internal.Reference where
|
||||
|
||||
|
||||
import Control.Monad.ST ( ST )
|
||||
|
||||
import Data.Foldable ( forM_ )
|
||||
import Data.Function ( on )
|
||||
import Data.List ( maximumBy )
|
||||
|
||||
import Numeric.LinearAlgebra
|
||||
import qualified Numeric.LinearAlgebra.Devel as U
|
||||
|
||||
im2col :: Int -> Int -> Int -> Int -> Matrix Double -> Matrix Double
|
||||
im2col nrows ncols srows scols m =
|
||||
let starts = fittingStarts (rows m) nrows srows (cols m) ncols scols
|
||||
in im2colFit starts nrows ncols m
|
||||
|
||||
vid2col :: Int -> Int -> Int -> Int -> Int -> Int -> [Matrix Double] -> Matrix Double
|
||||
vid2col nrows ncols srows scols inputrows inputcols ms =
|
||||
let starts = fittingStarts inputrows nrows srows inputcols ncols scols
|
||||
subs = fmap (im2colFit starts nrows ncols) ms
|
||||
in foldl1 (|||) subs
|
||||
|
||||
im2colFit :: [(Int,Int)] -> Int -> Int -> Matrix Double -> Matrix Double
|
||||
im2colFit starts nrows ncols m =
|
||||
let imRows = fmap (\start -> flatten $ subMatrix start (nrows, ncols) m) starts
|
||||
in fromRows imRows
|
||||
|
||||
col2vid :: Int -> Int -> Int -> Int -> Int -> Int -> Matrix Double -> [Matrix Double]
|
||||
col2vid nrows ncols srows scols drows dcols m =
|
||||
let starts = fittingStart (cols m) (nrows * ncols) (nrows * ncols)
|
||||
r = rows m
|
||||
mats = fmap (\s -> subMatrix (0,s) (r, nrows * ncols) m) starts
|
||||
colSts = fittingStarts drows nrows srows dcols ncols scols
|
||||
in fmap (col2imfit colSts nrows ncols drows dcols) mats
|
||||
|
||||
col2im :: Int -> Int -> Int -> Int -> Int -> Int -> Matrix Double -> Matrix Double
|
||||
col2im krows kcols srows scols drows dcols m =
|
||||
let starts = fittingStarts drows krows srows dcols kcols scols
|
||||
in col2imfit starts krows kcols drows dcols m
|
||||
|
||||
col2imfit :: [(Int,Int)] -> Int -> Int -> Int -> Int -> Matrix Double -> Matrix Double
|
||||
col2imfit starts krows kcols drows dcols m =
|
||||
let indicies = (\[a,b] -> (a,b)) <$> sequence [[0..(krows-1)], [0..(kcols-1)]]
|
||||
convs = fmap (zip indicies . toList) . toRows $ m
|
||||
pairs = zip convs starts
|
||||
accums = concatMap (\(conv',(stx',sty')) -> fmap (\((ix,iy), val) -> ((ix + stx', iy + sty'), val)) conv') pairs
|
||||
in accum (konst 0 (drows, dcols)) (+) accums
|
||||
|
||||
|
||||
poolForward :: Int -> Int -> Int -> Int -> Int -> Int -> Matrix Double -> Matrix Double
|
||||
poolForward nrows ncols srows scols outputRows outputCols m =
|
||||
let starts = fittingStarts (rows m) nrows srows (cols m) ncols scols
|
||||
in poolForwardFit starts nrows ncols outputRows outputCols m
|
||||
|
||||
poolForwardList :: Functor f => Int -> Int -> Int -> Int -> Int -> Int -> Int -> Int -> f (Matrix Double) -> f (Matrix Double)
|
||||
poolForwardList nrows ncols srows scols inRows inCols outputRows outputCols ms =
|
||||
let starts = fittingStarts inRows nrows srows inCols ncols scols
|
||||
in poolForwardFit starts nrows ncols outputRows outputCols <$> ms
|
||||
|
||||
poolForwardFit :: [(Int,Int)] -> Int -> Int -> Int -> Int -> Matrix Double -> Matrix Double
|
||||
poolForwardFit starts nrows ncols _ outputCols m =
|
||||
let els = fmap (\start -> unsafeMaxElementSubmatrix start (nrows, ncols) m) starts
|
||||
in matrix outputCols els
|
||||
|
||||
poolBackward :: Int -> Int -> Int -> Int -> Matrix Double -> Matrix Double -> Matrix Double
|
||||
poolBackward krows kcols srows scols inputMatrix gradientMatrix =
|
||||
let inRows = rows inputMatrix
|
||||
inCols = cols inputMatrix
|
||||
starts = fittingStarts inRows krows srows inCols kcols scols
|
||||
in poolBackwardFit starts krows kcols inputMatrix gradientMatrix
|
||||
|
||||
poolBackwardList :: Functor f => Int -> Int -> Int -> Int -> Int -> Int -> f (Matrix Double, Matrix Double) -> f (Matrix Double)
|
||||
poolBackwardList krows kcols srows scols inRows inCols inputMatrices =
|
||||
let starts = fittingStarts inRows krows srows inCols kcols scols
|
||||
in uncurry (poolBackwardFit starts krows kcols) <$> inputMatrices
|
||||
|
||||
poolBackwardFit :: [(Int,Int)] -> Int -> Int -> Matrix Double -> Matrix Double -> Matrix Double
|
||||
poolBackwardFit starts krows kcols inputMatrix gradientMatrix = U.runSTMatrix $ do
|
||||
let inRows = rows inputMatrix
|
||||
inCols = cols inputMatrix
|
||||
gradCol = cols gradientMatrix
|
||||
extent = (krows, kcols)
|
||||
|
||||
retM <- U.newMatrix 0 inRows inCols
|
||||
|
||||
forM_ (zip [0..] starts) $ \(ix, start) -> do
|
||||
let loc = unsafeMaxIndexSubMatrix start extent inputMatrix
|
||||
uncurry (unsafeModifyMatrix retM) loc ((+) $ uncurry (U.atM' gradientMatrix) $ divMod ix gradCol)
|
||||
|
||||
return retM
|
||||
|
||||
unsafeMaxElementSubmatrix :: (Int,Int) -> (Int,Int) -> Matrix Double -> Double
|
||||
unsafeMaxElementSubmatrix starts extent m = uncurry (U.atM' m) $ unsafeMaxIndexSubMatrix starts extent m
|
||||
|
||||
unsafeMaxIndexSubMatrix :: (Int,Int) -> (Int,Int) -> Matrix Double -> (Int, Int)
|
||||
unsafeMaxIndexSubMatrix (startRow, startCol) (extentRow, extentCold) m =
|
||||
let mrows = [startRow .. startRow + extentRow - 1]
|
||||
mcols = [startCol .. startCol + extentCold - 1]
|
||||
pairs = concatMap ( \r -> fmap (\c -> (r , c)) mcols ) mrows
|
||||
in maximumBy (compare `on` uncurry (U.atM' m)) pairs
|
||||
|
||||
|
||||
unsafeModifyMatrix :: U.STMatrix s Double -> Int -> Int -> (Double -> Double) -> ST s ()
|
||||
unsafeModifyMatrix x r c f = U.unsafeReadMatrix x r c >>= U.unsafeWriteMatrix x r c . f
|
||||
|
||||
|
||||
-- | These functions are not even remotely safe, but it's only called from the statically typed
|
||||
-- commands, so we should be good ?!?!?
|
||||
-- Returns the starting sub matrix locations which fit inside the larger matrix for the
|
||||
-- convolution. Takes into account the stride and kernel size.
|
||||
fittingStarts :: Int -> Int -> Int -> Int -> Int -> Int -> [(Int,Int)]
|
||||
fittingStarts nrows kernelrows steprows ncols kernelcols stepcolsh =
|
||||
let rs = fittingStart nrows kernelrows steprows
|
||||
cs = fittingStart ncols kernelcols stepcolsh
|
||||
ls = sequence [rs, cs]
|
||||
in fmap (\[a,b] -> (a,b)) ls
|
||||
|
||||
-- | Returns the starting sub vector which fit inside the larger vector for the
|
||||
-- convolution. Takes into account the stride and kernel size.
|
||||
fittingStart :: Int -> Int -> Int -> [Int]
|
||||
fittingStart width kernel steps =
|
||||
let go left | left + kernel < width
|
||||
= left : go (left + steps)
|
||||
| left + kernel == width
|
||||
= [left]
|
||||
| otherwise
|
||||
= []
|
||||
in go 0
|
@ -1,82 +1,43 @@
|
||||
{-# LANGUAGE TemplateHaskell #-}
|
||||
{-# LANGUAGE DataKinds #-}
|
||||
{-# LANGUAGE GADTs #-}
|
||||
{-# LANGUAGE DataKinds #-}
|
||||
{-# LANGUAGE KindSignatures #-}
|
||||
{-# LANGUAGE GADTs #-}
|
||||
{-# LANGUAGE ScopedTypeVariables #-}
|
||||
{-# OPTIONS_GHC -fno-warn-missing-signatures #-}
|
||||
module Test.Grenade.Layers.Pooling where
|
||||
|
||||
import Grenade.Layers.Internal.Pooling
|
||||
import Data.Proxy
|
||||
import Data.Singletons ()
|
||||
|
||||
import Numeric.LinearAlgebra hiding (uniformSample, konst, (===))
|
||||
import GHC.TypeLits
|
||||
import Grenade.Layers.Pooling
|
||||
|
||||
import Test.QuickCheck hiding ((><))
|
||||
import Disorder.Jack
|
||||
|
||||
prop_pool = once $
|
||||
let input = (3><4)
|
||||
[ 1.0, 14.0, 3.0, 4.0
|
||||
, 5.0, 6.0, 7.0, 8.0
|
||||
, 9.0, 10.0, 11.0, 12.0 ]
|
||||
expected = (2><3)
|
||||
[ 14.0, 14.0, 8.0
|
||||
, 10.0, 11.0, 12.0 ]
|
||||
out = poolForward 1 3 4 2 2 1 1 input
|
||||
in expected === out
|
||||
|
||||
prop_pool_rectangular = once $
|
||||
let input = (3><4)
|
||||
[ 1.0, 14.0, 3.0, 4.0
|
||||
, 5.0, 6.0, 7.0, 8.0
|
||||
, 9.0, 10.0, 11.0, 12.0 ]
|
||||
expected = (2><2)
|
||||
[ 14.0, 14.0
|
||||
, 11.0, 12.0 ]
|
||||
out = poolForward 1 3 4 2 3 1 1 input
|
||||
in expected === out
|
||||
data OpaquePooling :: * where
|
||||
OpaquePooling :: (KnownNat kh, KnownNat kw, KnownNat sh, KnownNat sw) => Pooling kh kw sh sw -> OpaquePooling
|
||||
|
||||
prop_pool_channels = once $
|
||||
let input = (6><4)
|
||||
[ 1.0, 14.0, 3.0, 4.0
|
||||
, 5.0, 6.0, 7.0, 8.0
|
||||
, 9.0, 10.0, 11.0, 12.0
|
||||
, 1.0, 2.0, 3.0, 4.0
|
||||
, 5.0, 6.0, 7.0, 8.0
|
||||
, 9.0, 10.0, 11.0, 12.0 ]
|
||||
expected = (4><2)
|
||||
[ 14.0, 14.0
|
||||
, 11.0, 12.0
|
||||
, 7.0, 8.0
|
||||
, 11.0, 12.0 ]
|
||||
out = poolForward 2 3 4 2 3 1 1 input
|
||||
in expected === out
|
||||
instance Show OpaquePooling where
|
||||
show (OpaquePooling n) = show n
|
||||
|
||||
prop_pool_backwards = once $
|
||||
let input = (3><4)
|
||||
[ 1.0, 2.0, 3.0, 4.0
|
||||
, 5.0, 6.0, 7.0, 8.0
|
||||
, 9.0, 10.0, 11.0, 12.0 ]
|
||||
grads = (2><3)
|
||||
[ -6.0, -7.0, -8.0
|
||||
, -10.0, -11.0, -12.0 ]
|
||||
expected = (3><4)
|
||||
[ 0.0, 0.0, 0.0, 0.0
|
||||
, 0.0, -6.0, -7.0, -8.0
|
||||
, 0.0,-10.0,-11.0,-12.0 ]
|
||||
out = poolBackward 1 3 4 2 2 1 1 input grads
|
||||
in expected === out
|
||||
genOpaquePooling :: Jack OpaquePooling
|
||||
genOpaquePooling = do
|
||||
Just kernelHeight <- someNatVal <$> choose (2, 15)
|
||||
Just kernelWidth <- someNatVal <$> choose (2, 15)
|
||||
Just strideHeight <- someNatVal <$> choose (2, 15)
|
||||
Just strideWidth <- someNatVal <$> choose (2, 15)
|
||||
|
||||
case (kernelHeight, kernelWidth, strideHeight, strideWidth) of
|
||||
(SomeNat (_ :: Proxy kh), SomeNat (_ :: Proxy kw), SomeNat (_ :: Proxy sh), SomeNat (_ :: Proxy sw)) ->
|
||||
return $ OpaquePooling (Pooling :: Pooling kh kw sh sw)
|
||||
|
||||
prop_pool_layer_witness =
|
||||
gamble genOpaquePooling $ \onet ->
|
||||
(case onet of
|
||||
(OpaquePooling (Pooling :: Pooling kernelRows kernelCols strideRows strideCols)) -> True
|
||||
) :: Bool
|
||||
|
||||
prop_pool_backwards_additive = once $
|
||||
let input = (3><4)
|
||||
[ 4.0, 2.0, 3.0, 4.0
|
||||
, 0.0, 0.0, 7.0, 8.0
|
||||
, 9.0, 0.0, 0.0, 0.0 ]
|
||||
grads = (2><3)
|
||||
[ -6.0, -7.0, -8.0
|
||||
, -10.0, -11.0, -12.0 ]
|
||||
expected = (3><4)
|
||||
[-6.0, 0.0, 0.0, 0.0
|
||||
, 0.0, 0.0,-18.0,-20.0
|
||||
,-10.0, 0.0, 0.0, 0.0 ]
|
||||
out = poolBackward 1 3 4 2 2 1 1 input grads
|
||||
in expected === out
|
||||
|
||||
return []
|
||||
tests :: IO Bool
|
||||
|
20
test/Test/Jack/Hmatrix.hs
Normal file
20
test/Test/Jack/Hmatrix.hs
Normal file
@ -0,0 +1,20 @@
|
||||
{-# LANGUAGE RankNTypes #-}
|
||||
{-# LANGUAGE DataKinds #-}
|
||||
{-# LANGUAGE ScopedTypeVariables #-}
|
||||
|
||||
module Test.Jack.Hmatrix where
|
||||
|
||||
import Data.Proxy
|
||||
import Disorder.Jack
|
||||
|
||||
import GHC.TypeLits
|
||||
|
||||
import qualified Numeric.LinearAlgebra.Static as HStatic
|
||||
|
||||
randomVector :: forall n. KnownNat n => Jack (HStatic.R n)
|
||||
randomVector = HStatic.fromList <$> vectorOf (fromInteger (natVal (Proxy :: Proxy n))) sizedRealFrac
|
||||
|
||||
uniformSample :: forall m n. (KnownNat m, KnownNat n) => Jack (HStatic.L m n)
|
||||
uniformSample = HStatic.fromList
|
||||
<$> vectorOf (fromInteger (natVal (Proxy :: Proxy m)) * fromInteger (natVal (Proxy :: Proxy n)))
|
||||
sizedRealFrac
|
13
test/test.hs
13
test/test.hs
@ -1,11 +1,20 @@
|
||||
import Disorder.Core.Main
|
||||
|
||||
import qualified Test.Grenade.Layers.Pooling as Test.Grenade.Layers.Pooling
|
||||
import qualified Test.Grenade.Layers.Convolution as Test.Grenade.Layers.Convolution
|
||||
import qualified Test.Grenade.Layers.Pooling as Test.Grenade.Layers.Pooling
|
||||
import qualified Test.Grenade.Layers.Convolution as Test.Grenade.Layers.Convolution
|
||||
import qualified Test.Grenade.Layers.FullyConnected as Test.Grenade.Layers.FullyConnected
|
||||
|
||||
import qualified Test.Grenade.Layers.Internal.Convolution as Test.Grenade.Layers.Internal.Convolution
|
||||
import qualified Test.Grenade.Layers.Internal.Pooling as Test.Grenade.Layers.Internal.Pooling
|
||||
|
||||
|
||||
main :: IO ()
|
||||
main =
|
||||
disorderMain [
|
||||
Test.Grenade.Layers.Pooling.tests
|
||||
, Test.Grenade.Layers.Convolution.tests
|
||||
, Test.Grenade.Layers.FullyConnected.tests
|
||||
|
||||
, Test.Grenade.Layers.Internal.Convolution.tests
|
||||
, Test.Grenade.Layers.Internal.Pooling.tests
|
||||
]
|
||||
|
Loading…
Reference in New Issue
Block a user