aboutsummaryrefslogtreecommitdiff
path: root/examples/Numeric/ADDual/Examples.hs
diff options
context:
space:
mode:
authorTom Smeding <t.j.smeding@uu.nl>2025-02-20 22:41:53 +0100
committerTom Smeding <t.j.smeding@uu.nl>2025-02-20 22:41:53 +0100
commit20754e0ae9226f658590f46105399aee5c0dcee2 (patch)
tree261be51f07c530d09665708fb149dd241c296b70 /examples/Numeric/ADDual/Examples.hs
parentfe3132304b6c25e5bebc9fb327e3ea5d6018be7a (diff)
Try debugging crash
Diffstat (limited to 'examples/Numeric/ADDual/Examples.hs')
-rw-r--r--examples/Numeric/ADDual/Examples.hs65
1 files changed, 65 insertions, 0 deletions
diff --git a/examples/Numeric/ADDual/Examples.hs b/examples/Numeric/ADDual/Examples.hs
new file mode 100644
index 0000000..d6aa6d2
--- /dev/null
+++ b/examples/Numeric/ADDual/Examples.hs
@@ -0,0 +1,65 @@
+{-# LANGUAGE DeriveGeneric #-}
+{-# LANGUAGE DeriveTraversable #-}
+{-# LANGUAGE TypeApplications #-}
+module Numeric.ADDual.Examples where
+
+import Control.DeepSeq
+import Control.Monad (replicateM)
+import qualified Data.Vector as V
+import GHC.Generics (Generic)
+import Hedgehog (Gen)
+import qualified Hedgehog.Gen as Gen
+import qualified Hedgehog.Range as Range
+
+
+type Matrix s = V.Vector s
+
+data FNeural a = FNeural [(Matrix a, V.Vector a)] (V.Vector a)
+ deriving (Show, Functor, Foldable, Traversable, Generic)
+
+instance NFData a => NFData (FNeural a)
+
+fneural :: (Floating a, Ord a) => FNeural a -> a
+fneural (FNeural layers input) =
+ let dotp v1 v2 = V.sum (V.zipWith (*) v1 v2)
+
+ mat @. vec =
+ let n = V.length vec
+ m = V.length mat `div` n
+ in V.fromListN m $ map (\i -> dotp (V.slice (n*i) n mat) vec) [0 .. m-1]
+ (+.) = V.zipWith (+)
+
+ relu x = if x >= 0.0 then x else 0.0
+ safeSoftmax vec = let m = V.maximum vec
+ factor = V.sum (V.map (\z -> exp (z - m)) vec)
+ in V.map (\z -> exp (z - m) / factor) vec
+ forward [] x = safeSoftmax x
+ forward ((weights, bias) : lys) x =
+ let x' = V.map relu ((weights @. x) +. bias)
+ in forward lys x'
+ in V.sum $ forward layers input
+
+makeNeuralInput :: FNeural Double
+makeNeuralInput =
+ let genMatrix nin nout =
+ V.fromListN (nin*nout) [sin (fromIntegral @Int (i+j))
+ | i <- [0..nout-1], j <- [0..nin-1]]
+ genVector nout = V.fromListN nout [sin (0.41 * fromIntegral @Int i) | i <- [0..nout-1]]
+ -- 50 inputs; 2 hidden layers (100; 50); final softmax, then sum the outputs.
+ nIn = 50; n1 = 100; n2 = 50
+ in FNeural [(genMatrix nIn n1, genVector n1)
+ ,(genMatrix n1 n2, genVector n2)]
+ (genVector nIn)
+
+genNeuralInput :: Gen (FNeural Double)
+genNeuralInput = do
+ let genScalar = Gen.double (Range.linearFracFrom 0 (-1) 1)
+ genMatrix nin nout = V.fromListN (nin*nout) <$> replicateM (nin*nout) genScalar
+ genVector nout = V.fromListN nout <$> replicateM nout genScalar
+ nIn <- Gen.integral (Range.linear 1 80)
+ n1 <- Gen.integral (Range.linear 1 100)
+ n2 <- Gen.integral (Range.linear 1 80)
+ m1 <- genMatrix nIn n1; v1 <- genVector n1
+ m2 <- genMatrix n1 n2; v2 <- genVector n2
+ inp <- genVector nIn
+ pure $ FNeural [(m1, v1), (m2, v2)] inp