C++ Library for Neural Networks — Use libneuron to design neural networks with back propagation and evolution.
You can not select more than 25 topics
Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.
123 lines
3.7 KiB
123 lines
3.7 KiB
/*! @file |
|
|
|
@id $Id$ |
|
*/ |
|
// 1 2 3 4 5 6 7 8 |
|
// 45678901234567890123456789012345678901234567890123456789012345678901234567890 |
|
|
|
#include <matrix.hxx> |
|
|
|
/** @mainpage Neural Network with Hidden Layers |
|
|
|
@section neuro-intro Overview |
|
|
|
A complex neural network can be imitiated as a vector @c I of @c i |
|
input values, a vector @c O of @c o output values and any number |
|
@c l of hidden layers, where each of them contains @c h |
|
neurons. |
|
|
|
A neural network with double precision is inistialized as: |
|
@code |
|
NeuroNet<double, i, o, l+1, h> net; |
|
@endcode |
|
|
|
@dot |
|
digraph g { |
|
rankdir=LR; |
|
ranksep=1.5; |
|
subgraph clusterInput { |
|
label="Input Layer"; |
|
I1 [label=<I<SUB>1</SUB>>]; |
|
I2 [label=<I<SUB>2</SUB>>]; |
|
Ix [label=<I<SUB>…</SUB>>]; |
|
Ii [label=<I<SUB>i</SUB>>]; |
|
} |
|
subgraph clusterHidden1 { |
|
label="First Hidden Layer"; |
|
H11 [label=<H<SUB>11</SUB>>]; |
|
H12 [label=<H<SUB>12</SUB>>]; |
|
H1x [label=<H<SUB>1…</SUB>>]; |
|
H1h [label=<H<SUB>1h</SUB>>]; |
|
} |
|
subgraph clusterHidden2 { |
|
label="Second Hidden Layer"; |
|
H21 [label=<H<SUB>21</SUB>>]; |
|
H22 [label=<H<SUB>22</SUB>>]; |
|
H2x [label=<H<SUB>2…</SUB>>]; |
|
H2h [label=<H<SUB>2h</SUB>>]; |
|
} |
|
subgraph clusterHiddenx { |
|
label="More Hidden Layers"; |
|
Hx1 [label=<H<SUB>…1</SUB>>]; |
|
Hx2 [label=<H<SUB>…2</SUB>>]; |
|
Hxx [label=<H<SUB>……</SUB>>]; |
|
Hxh [label=<H<SUB>…h</SUB>>]; |
|
} |
|
subgraph clusterHiddenl { |
|
label="Last Hidden Layer"; |
|
Hl1 [label=<H<SUB>l1</SUB>>]; |
|
Hl2 [label=<H<SUB>l2</SUB>>]; |
|
Hlx [label=<H<SUB>l…</SUB>>]; |
|
Hlh [label=<H<SUB>lh</SUB>>]; |
|
} |
|
subgraph clusterOutput { |
|
label="Output Layer"; |
|
O1 [label=<O<SUB>1</SUB>>]; |
|
O2 [label=<O<SUB>2</SUB>>]; |
|
Ox [label=<O<SUB>…</SUB>>]; |
|
Oo [label=<O<SUB>o</SUB>>]; |
|
} |
|
I1 -> { H11; H12; H1x; H1h; } |
|
I2 -> { H11; H12; H1x; H1h; } |
|
Ix -> { H11; H12; H1x; H1h; } |
|
Ii -> { H11; H12; H1x; H1h; } |
|
H11 -> { H21; H22; H2x; H2h; } |
|
H12 -> { H21; H22; H2x; H2h; } |
|
H1x -> { H21; H22; H2x; H2h; } |
|
H1h -> { H21; H22; H2x; H2h; } |
|
H21 -> { Hx1; Hx2; Hxx; Hxh; } |
|
H22 -> { Hx1; Hx2; Hxx; Hxh; } |
|
H2x -> { Hx1; Hx2; Hxx; Hxh; } |
|
H2h -> { Hx1; Hx2; Hxx; Hxh; } |
|
Hx1 -> { Hl1; Hl2; Hlx; Hlh; } |
|
Hx2 -> { Hl1; Hl2; Hlx; Hlh; } |
|
Hxx -> { Hl1; Hl2; Hlx; Hlh; } |
|
Hxh -> { Hl1; Hl2; Hlx; Hlh; } |
|
Hl1 -> { O1; O2; Ox; Oo; } |
|
Hl2 -> { O1; O2; Ox; Oo; } |
|
Hlx -> { O1; O2; Ox; Oo; } |
|
Hlh -> { O1; O2; Ox; Oo; } |
|
} |
|
@enddot |
|
|
|
@section neuro-forward Forward Propagation |
|
|
|
The connections between two layers can be modelled as a |
|
Matrix. Then Matrix H<sub>1</sub> contains the weights from @c I |
|
to the first hidden layer, @c H<sub>2</sub> from the first to the |
|
second, and so on, until @c H<sub>l+1</sub> contains the weights |
|
from layer @c l to the output @c O. |
|
|
|
The output vector is then calculated as: |
|
O = I × H<sub>1</sub> × H<sub>2</sub> × H<sub>…</sub> × H<sub>l+1</sub> |
|
|
|
@code |
|
const size_type i(4); |
|
const size_type o(2); |
|
NeuroNet<double, i, o> net; |
|
Matrix<1, i> input(1.0, 2.0, 0.0, -1.0); |
|
Matrix<1, o> output = net(input); |
|
@endcode |
|
|
|
@section neuro-backward Back Propagation |
|
|
|
|
|
*/ |
|
template |
|
<typename TYPE, |
|
size_t INPUT_LAYERS, |
|
size_t OUTPUT_LAYERS, |
|
size_t HIDDEN_LAYERS = INPUT_LAYERS+OUTPUT_LAYERS, |
|
size_t HIDDEN_LAYER_SIZE = INPUT_LAYERS+OUTPUT_LAYERS> |
|
class NeuroNet { |
|
};
|
|
|