|
|
|
@ -98,7 +98,7 @@ |
|
|
|
|
second, and so on, until @c H<sub>l+1</sub> contains the weights |
|
|
|
|
from layer @c l to the output @c O. |
|
|
|
|
|
|
|
|
|
The output vector is then calculatd as: |
|
|
|
|
The output vector is then calculated as: |
|
|
|
|
O = I × H<sub>1</sub> × H<sub>2</sub> × H<sub>…</sub> × H<sub>l+1</sub> |
|
|
|
|
|
|
|
|
|
@code |
|
|
|
@ -106,10 +106,10 @@ |
|
|
|
|
const size_type o(2); |
|
|
|
|
NeuroNet<double, i, o> net; |
|
|
|
|
Matrix<1, i> input(1.0, 2.0, 0.0, -1.0); |
|
|
|
|
Matrix<1, o> output = net.propagate(input); |
|
|
|
|
Matrix<1, o> output = net(input); |
|
|
|
|
@endcode |
|
|
|
|
|
|
|
|
|
@section beuro-backward Back Propagation |
|
|
|
|
@section neuro-backward Back Propagation |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
*/ |
|
|
|
|