/*! @file @id $Id$ */ // 1 2 3 4 5 6 7 8 // 45678901234567890123456789012345678901234567890123456789012345678901234567890 #include /** @mainpage Neural Network with Hidden Layers @section neuro-intro Overview A complex neural network can be imitiated as a vector @c I of @c i input values, a vector @c O of @c o output values and any number @c l of hidden layers, where each of them contains @c h neurons. A neural network with double precision is inistialized as: @code NeuroNet net; @endcode @dot digraph g { rankdir=LR; ranksep=1.5; subgraph clusterInput { label="Input Layer"; I1 [label=1>]; I2 [label=2>]; Ix [label=…>]; Ii [label=i>]; } subgraph clusterHidden1 { label="First Hidden Layer"; H11 [label=11>]; H12 [label=12>]; H1x [label=1…>]; H1h [label=1h>]; } subgraph clusterHidden2 { label="Second Hidden Layer"; H21 [label=21>]; H22 [label=22>]; H2x [label=2…>]; H2h [label=2h>]; } subgraph clusterHiddenx { label="More Hidden Layers"; Hx1 [label=…1>]; Hx2 [label=…2>]; Hxx [label=……>]; Hxh [label=…h>]; } subgraph clusterHiddenl { label="Last Hidden Layer"; Hl1 [label=l1>]; Hl2 [label=l2>]; Hlx [label=l…>]; Hlh [label=lh>]; } subgraph clusterOutput { label="Output Layer"; O1 [label=1>]; O2 [label=2>]; Ox [label=…>]; Oo [label=o>]; } I1 -> { H11; H12; H1x; H1h; } I2 -> { H11; H12; H1x; H1h; } Ix -> { H11; H12; H1x; H1h; } Ii -> { H11; H12; H1x; H1h; } H11 -> { H21; H22; H2x; H2h; } H12 -> { H21; H22; H2x; H2h; } H1x -> { H21; H22; H2x; H2h; } H1h -> { H21; H22; H2x; H2h; } H21 -> { Hx1; Hx2; Hxx; Hxh; } H22 -> { Hx1; Hx2; Hxx; Hxh; } H2x -> { Hx1; Hx2; Hxx; Hxh; } H2h -> { Hx1; Hx2; Hxx; Hxh; } Hx1 -> { Hl1; Hl2; Hlx; Hlh; } Hx2 -> { Hl1; Hl2; Hlx; Hlh; } Hxx -> { Hl1; Hl2; Hlx; Hlh; } Hxh -> { Hl1; Hl2; Hlx; Hlh; } Hl1 -> { O1; O2; Ox; Oo; } Hl2 -> { O1; O2; Ox; Oo; } Hlx -> { O1; O2; Ox; Oo; } Hlh -> { O1; O2; Ox; Oo; } } @enddot @section neuro-forward Forward Propagation The connections between two layers can be modelled as a Matrix. Then Matrix H1 contains the weights from @c I to the first hidden layer, @c H2 from the first to the second, and so on, until @c Hl+1 contains the weights from layer @c l to the output @c O. The output vector is then calculated as: O = I × H1 × H2 × H × Hl+1 @code const size_type i(4); const size_type o(2); NeuroNet net; Matrix<1, i> input(1.0, 2.0, 0.0, -1.0); Matrix<1, o> output = net(input); @endcode @section neuro-backward Back Propagation */ template class NeuroNet { };