Spelling: decend/decent -> descend/descent

This commit is contained in:
Erik de Castro Lopo 2017-09-25 19:56:42 +10:00
parent 8e3e7dbed6
commit 9fd2111122
10 changed files with 29 additions and 29 deletions

View File

@ -27,8 +27,8 @@ main = do
, bench "backwards-60" $ nf (nfT3 . uncurry4 (testRun60' layer60)) (rec60, input40, rec60, rec60)
, bench "backwards-512" $ nf (nfT3 . uncurry4 (testRun512' layer512)) (rec512, input40, rec512, rec512)
]
, bgroup "update" [ bench "matrix-60x60" $ nf (uncurry3 (decendVector 1 1 1)) (upIn60, upIn60, upIn60)
, bench "matrix-512x512" $ nf (uncurry3 (decendVector 1 1 1)) (upIn512, upIn512, upIn512)
, bgroup "update" [ bench "matrix-60x60" $ nf (uncurry3 (descendVector 1 1 1)) (upIn60, upIn60, upIn60)
, bench "matrix-512x512" $ nf (uncurry3 (descendVector 1 1 1)) (upIn512, upIn512, upIn512)
]
, bgroup "train" [ bench "one-time-step" $ whnf (nfT2 . trainRecurrent lp lstm 0) [(input40, Just input40)]
, bench "ten-time-steps" $ whnf (nfT2 . trainRecurrent lp lstm 0) $ replicate 10 (input40, Just input40)

View File

@ -1,6 +1,6 @@
#include "gradient_decent.h"
#include "gradient_descent.h"
void decend_cpu(int len, double rate, double momentum, double regulariser,
void descend_cpu(int len, double rate, double momentum, double regulariser,
const double* weights,
const double* gradient,
const double* last,

View File

@ -1,7 +1,7 @@
#include <stdio.h>
#include <stdint.h>
void decend_cpu(int len, double rate, double momentum, double regulariser,
void descend_cpu(int len, double rate, double momentum, double regulariser,
const double* weights,
const double* gradient,
const double* last,

View File

@ -17,7 +17,7 @@ description:
Grenade provides an API for composing layers of a neural network
into a sequence parallel graph in a type safe manner; running
networks with reverse automatic differentiation to calculate their
gradients; and applying gradient decent for learning.
gradients; and applying gradient descent for learning.
.
Documentation and examples are available on github
<https://github.com/HuwCampbell/grenade>.
@ -26,8 +26,8 @@ extra-source-files:
README.md
cbits/im2col.h
cbits/im2col.c
cbits/gradient_decent.h
cbits/gradient_decent.c
cbits/gradient_descent.h
cbits/gradient_descent.c
cbits/pad.h
cbits/pad.c
@ -108,10 +108,10 @@ library
Grenade.Utils.OneHot
includes: cbits/im2col.h
cbits/gradient_decent.h
cbits/gradient_descent.h
cbits/pad.h
c-sources: cbits/im2col.c
cbits/gradient_decent.c
cbits/gradient_descent.c
cbits/pad.c
cc-options: -std=c99 -O3 -msse4.2 -Wall -Werror -DCABAL=1

View File

@ -139,7 +139,7 @@ runGradient net tapes o =
= (GNil, o)
-- | Apply one step of stochastic gradient decent across the network.
-- | Apply one step of stochastic gradient descent across the network.
applyUpdate :: LearningParameters
-> Network layers shapes
-> Gradients layers

View File

@ -131,7 +131,7 @@ instance ( KnownNat channels
) => UpdateLayer (Convolution channels filters kernelRows kernelColumns strideRows strideColumns) where
type Gradient (Convolution channels filters kernelRows kernelCols strideRows strideCols) = (Convolution' channels filters kernelRows kernelCols strideRows strideCols)
runUpdate LearningParameters {..} (Convolution oldKernel oldMomentum) (Convolution' kernelGradient) =
let (newKernel, newMomentum) = decendMatrix learningRate learningMomentum learningRegulariser oldKernel kernelGradient oldMomentum
let (newKernel, newMomentum) = descendMatrix learningRate learningMomentum learningRegulariser oldKernel kernelGradient oldMomentum
in Convolution newKernel newMomentum
createRandom = randomConvolution

View File

@ -130,7 +130,7 @@ instance ( KnownNat channels
) => UpdateLayer (Deconvolution channels filters kernelRows kernelColumns strideRows strideColumns) where
type Gradient (Deconvolution channels filters kernelRows kernelCols strideRows strideCols) = (Deconvolution' channels filters kernelRows kernelCols strideRows strideCols)
runUpdate LearningParameters {..} (Deconvolution oldKernel oldMomentum) (Deconvolution' kernelGradient) =
let (newKernel, newMomentum) = decendMatrix learningRate learningMomentum learningRegulariser oldKernel kernelGradient oldMomentum
let (newKernel, newMomentum) = descendMatrix learningRate learningMomentum learningRegulariser oldKernel kernelGradient oldMomentum
in Deconvolution newKernel newMomentum
createRandom = randomDeconvolution

View File

@ -39,8 +39,8 @@ instance (KnownNat i, KnownNat o) => UpdateLayer (FullyConnected i o) where
type Gradient (FullyConnected i o) = (FullyConnected' i o)
runUpdate LearningParameters {..} (FullyConnected (FullyConnected' oldBias oldActivations) (FullyConnected' oldBiasMomentum oldMomentum)) (FullyConnected' biasGradient activationGradient) =
let (newBias, newBiasMomentum) = decendVector learningRate learningMomentum learningRegulariser oldBias biasGradient oldBiasMomentum
(newActivations, newMomentum) = decendMatrix learningRate learningMomentum learningRegulariser oldActivations activationGradient oldMomentum
let (newBias, newBiasMomentum) = descendVector learningRate learningMomentum learningRegulariser oldBias biasGradient oldBiasMomentum
(newActivations, newMomentum) = descendMatrix learningRate learningMomentum learningRegulariser oldActivations activationGradient oldMomentum
in FullyConnected (FullyConnected' newBias newActivations) (FullyConnected' newBiasMomentum newMomentum)
createRandom = randomFullyConnected

View File

@ -1,7 +1,7 @@
{-# LANGUAGE ForeignFunctionInterface #-}
module Grenade.Layers.Internal.Update (
decendMatrix
, decendVector
descendMatrix
, descendVector
) where
import Data.Maybe ( fromJust )
@ -17,8 +17,8 @@ import qualified Numeric.LinearAlgebra.Devel as U
import System.IO.Unsafe ( unsafePerformIO )
decendMatrix :: (KnownNat rows, KnownNat columns) => Double -> Double -> Double -> L rows columns -> L rows columns -> L rows columns -> (L rows columns, L rows columns)
decendMatrix rate momentum regulariser weights gradient lastUpdate =
descendMatrix :: (KnownNat rows, KnownNat columns) => Double -> Double -> Double -> L rows columns -> L rows columns -> L rows columns -> (L rows columns, L rows columns)
descendMatrix rate momentum regulariser weights gradient lastUpdate =
let (rows, cols) = size weights
len = rows * cols
-- Most gradients come in in ColumnMajor,
@ -29,7 +29,7 @@ decendMatrix rate momentum regulariser weights gradient lastUpdate =
weights' = flatten . tr . extract $ weights
gradient' = flatten . tr . extract $ gradient
lastUpdate' = flatten . tr . extract $ lastUpdate
(vw, vm) = decendUnsafe len rate momentum regulariser weights' gradient' lastUpdate'
(vw, vm) = descendUnsafe len rate momentum regulariser weights' gradient' lastUpdate'
-- Note that it's ColumnMajor, as we did a transpose before
-- using the internal vectors.
@ -37,17 +37,17 @@ decendMatrix rate momentum regulariser weights gradient lastUpdate =
mm = U.matrixFromVector U.ColumnMajor rows cols vm
in (fromJust . create $ mw, fromJust . create $ mm)
decendVector :: (KnownNat r) => Double -> Double -> Double -> R r -> R r -> R r -> (R r, R r)
decendVector rate momentum regulariser weights gradient lastUpdate =
descendVector :: (KnownNat r) => Double -> Double -> Double -> R r -> R r -> R r -> (R r, R r)
descendVector rate momentum regulariser weights gradient lastUpdate =
let len = size weights
weights' = extract weights
gradient' = extract gradient
lastUpdate' = extract lastUpdate
(vw, vm) = decendUnsafe len rate momentum regulariser weights' gradient' lastUpdate'
(vw, vm) = descendUnsafe len rate momentum regulariser weights' gradient' lastUpdate'
in (fromJust $ create vw, fromJust $ create vm)
decendUnsafe :: Int -> Double -> Double -> Double -> Vector Double -> Vector Double -> Vector Double -> (Vector Double, Vector Double)
decendUnsafe len rate momentum regulariser weights gradient lastUpdate =
descendUnsafe :: Int -> Double -> Double -> Double -> Vector Double -> Vector Double -> Vector Double -> (Vector Double, Vector Double)
descendUnsafe len rate momentum regulariser weights gradient lastUpdate =
unsafePerformIO $ do
outWPtr <- mallocForeignPtrArray len
outMPtr <- mallocForeignPtrArray len
@ -60,11 +60,11 @@ decendUnsafe len rate momentum regulariser weights gradient lastUpdate =
withForeignPtr lPtr $ \lPtr' ->
withForeignPtr outWPtr $ \outWPtr' ->
withForeignPtr outMPtr $ \outMPtr' ->
decend_cpu len rate momentum regulariser wPtr' gPtr' lPtr' outWPtr' outMPtr'
descend_cpu len rate momentum regulariser wPtr' gPtr' lPtr' outWPtr' outMPtr'
return (U.unsafeFromForeignPtr0 outWPtr len, U.unsafeFromForeignPtr0 outMPtr len)
foreign import ccall unsafe
decend_cpu
descend_cpu
:: Int -> Double -> Double -> Double -> Ptr Double -> Ptr Double -> Ptr Double -> Ptr Double -> Ptr Double -> IO ()

View File

@ -87,11 +87,11 @@ instance (KnownNat i, KnownNat o) => UpdateLayer (LSTM i o) where
-- Utility function for updating with the momentum, gradients, and weights.
u :: forall x ix out. (KnownNat ix, KnownNat out) => (x -> (L out ix)) -> x -> x -> x -> ((L out ix), (L out ix))
u e (e -> weights) (e -> momentum) (e -> gradient) =
decendMatrix learningRate learningMomentum learningRegulariser weights gradient momentum
descendMatrix learningRate learningMomentum learningRegulariser weights gradient momentum
v :: forall x ix. (KnownNat ix) => (x -> (R ix)) -> x -> x -> x -> ((R ix), (R ix))
v e (e -> weights) (e -> momentum) (e -> gradient) =
decendVector learningRate learningMomentum learningRegulariser weights gradient momentum
descendVector learningRate learningMomentum learningRegulariser weights gradient momentum
-- There's a lot of updates here, so to try and minimise the number of data copies
-- we'll create a mutable bucket for each.