diff options
| author | Tom Smeding <tom@tomsmeding.com> | 2025-10-23 23:43:54 +0200 |
|---|---|---|
| committer | Tom Smeding <tom@tomsmeding.com> | 2025-10-23 23:43:54 +0200 |
| commit | 1adad63b14aa8d13295fb0d68fbdc8ab988775b2 (patch) | |
| tree | 3310a49b33e6996a8470d6ec053ecced0bc0dae3 | |
| parent | c50aecbb209bb99da074b6c6911b672d0f1d1388 (diff) | |
Add uniform-tree test and benchmark
We need an optimisation that detects replicate-like behaviour and turns
it into actual replicates, which should then be fused away. The problem
is exhibited by this function exUniformFree.
| -rw-r--r-- | bench/Main.hs | 33 | ||||
| -rw-r--r-- | src/Example.hs | 11 | ||||
| -rw-r--r-- | test/Main.hs | 2 |
3 files changed, 31 insertions, 15 deletions
diff --git a/bench/Main.hs b/bench/Main.hs index 34e8bae..ec9264b 100644 --- a/bench/Main.hs +++ b/bench/Main.hs @@ -1,11 +1,13 @@ {-# LANGUAGE DataKinds #-} +{-# LANGUAGE FlexibleContexts #-} +{-# LANGUAGE FlexibleInstances #-} +{-# LANGUAGE NumericUnderscores #-} +{-# LANGUAGE ScopedTypeVariables #-} {-# LANGUAGE StandaloneKindSignatures #-} -{-# LANGUAGE TypeOperators #-} +{-# LANGUAGE TypeApplications #-} {-# LANGUAGE TypeFamilies #-} +{-# LANGUAGE TypeOperators #-} {-# LANGUAGE UndecidableInstances #-} -{-# LANGUAGE TypeApplications #-} -{-# LANGUAGE ScopedTypeVariables #-} -{-# LANGUAGE FlexibleInstances #-} {-# OPTIONS -Wno-orphans #-} module Main where @@ -98,18 +100,19 @@ makeGMMInputs = accumConfig :: CHADConfig accumConfig = chcSetAccum CHAD.defaultConfig -main :: IO () -main = defaultMain - [env (return makeNeuralInputs) $ \inputs -> bgroup "neural" - [env (gradCHAD CHAD.defaultConfig neural) $ \fun -> - bench "default" (nfAppIO fun inputs) - ,env (gradCHAD accumConfig neural) $ \fun -> - bench "accum" (nfAppIO fun inputs) - ] - ,env (return makeGMMInputs) $ \inputs -> bgroup "gmm" - [env (gradCHAD CHAD.defaultConfig (gmmObjective False)) $ \fun -> +bgroupDefaultAccum :: (KnownEnv env, NFData (Rep (Tup (D2E env)))) + => String -> Ex env R -> SList Value env -> Benchmark +bgroupDefaultAccum name term inputs = + bgroup name + [env (gradCHAD CHAD.defaultConfig term) $ \fun -> bench "default" (nfAppIO fun inputs) - ,env (gradCHAD accumConfig (gmmObjective False)) $ \fun -> + ,env (gradCHAD accumConfig term) $ \fun -> bench "accum" (nfAppIO fun inputs) ] + +main :: IO () +main = defaultMain + [env (return makeNeuralInputs) $ bgroupDefaultAccum "neural" neural + ,env (return makeGMMInputs) $ bgroupDefaultAccum "gmm" (gmmObjective False) + ,bgroupDefaultAccum "uniform-free" exUniformFree (Value 42.0 `SCons` Value 1000_000 `SCons` SNil) ] diff --git a/src/Example.hs b/src/Example.hs index 1784a48..bccc8de 100644 --- a/src/Example.hs +++ b/src/Example.hs @@ -167,3 +167,14 @@ neuralGo = (Value dinput_2 `SCons` Value dlay3_2 `SCons` Value dlay2_2 `SCons` Value dlay1_2 `SCons` SNil) = drevByFwdInterp knownEnv neural argument 1.0 in trace (ppExpr knownEnv revderiv) $ (primal, (dlay1_1, dlay2_1, dlay3_1, dinput_1), (dlay1_2, dlay2_2, dlay3_2, dinput_2)) + +-- The build body uses free variables in a non-linear way, so their primal +-- values are required in the dual of the build. Thus, compositionally, they +-- are stored in the tape from each individual lambda invocation. This results +-- in n copies of y and z, where only one copy would have sufficed. +exUniformFree :: Ex '[R, I64] R +exUniformFree = fromNamed $ lambda #n $ lambda #x $ body $ + let_ #y (#x * 2) $ + let_ #z (#x * 3) $ + idx0 $ sum1i $ + build1 #n $ #i :-> #y * #z + toFloat_ #i diff --git a/test/Main.hs b/test/Main.hs index 0c7ec76..9c992ec 100644 --- a/test/Main.hs +++ b/test/Main.hs @@ -667,6 +667,8 @@ tests_AD = testGroup "AD" ,adTestGen "gmm-wrong" (Example.gmmObjective True) gen_gmm ,adTestGen "gmm" (Example.gmmObjective False) gen_gmm + + ,adTestTp "uniform-free" (C "" 0 :& ()) Example.exUniformFree ] main :: IO () |
