|
|
|
@ -6,6 +6,7 @@ |
|
|
|
|
// 45678901234567890123456789012345678901234567890123456789012345678901234567890
|
|
|
|
|
|
|
|
|
|
#include <matrix.hxx> |
|
|
|
|
#include <cmath> |
|
|
|
|
|
|
|
|
|
/** @mainpage Neural Network with Hidden Layers
|
|
|
|
|
|
|
|
|
@ -66,9 +67,9 @@ |
|
|
|
|
@c l of hidden layers, where each of them contains @c h |
|
|
|
|
neurons. |
|
|
|
|
|
|
|
|
|
A neural network with double precision is inistialized as: |
|
|
|
|
A neural network with double precision is initialized as: |
|
|
|
|
@code |
|
|
|
|
NeuroNet<double, i, o, l+1, h> net; |
|
|
|
|
NeuroNet<double, i, o, l, h> net; |
|
|
|
|
@endcode |
|
|
|
|
|
|
|
|
|
@dot |
|
|
|
@ -134,34 +135,80 @@ |
|
|
|
|
second, and so on, until @c H<sub>l+1</sub> contains the weights |
|
|
|
|
from layer @c l to the output @c O. |
|
|
|
|
|
|
|
|
|
The output vector is then calculated as: |
|
|
|
|
O = I × H<sub>1</sub> × H<sub>2</sub> × H<sub>…</sub> × H<sub>l+1</sub> |
|
|
|
|
There is also an activation function @f. For back propagation, |
|
|
|
|
this function needs a first derivation @c f'. |
|
|
|
|
|
|
|
|
|
To get the activation of the first hidden layer, the input vector |
|
|
|
|
is multiplied with the weight matrix of the first hidden layer, |
|
|
|
|
this results in an output vector. Then the activation function is |
|
|
|
|
applied to all values of the output vector: |
|
|
|
|
|
|
|
|
|
<pre> |
|
|
|
|
V<sub>1</sub> = f(I×H<sub>1</sub>) |
|
|
|
|
</pre> |
|
|
|
|
|
|
|
|
|
This is done for all layers, up to the output. The output vector |
|
|
|
|
is then calculated as: |
|
|
|
|
|
|
|
|
|
<pre> |
|
|
|
|
O = f(f(f(f(I×H<sub>1</sub>)×H<sub>2</sub>)×H<sub>…</sub>)×H<sub>l+1</sub>) |
|
|
|
|
</pre> |
|
|
|
|
|
|
|
|
|
@code |
|
|
|
|
const size_type i(4); |
|
|
|
|
const size_type o(2); |
|
|
|
|
NeuroNet<double, i, o> net; |
|
|
|
|
Matrix<1, i> input(1.0, 2.0, 0.0, -1.0); |
|
|
|
|
Matrix<1, o> output = net(input); |
|
|
|
|
Matrix<1, o> output = feed(input); |
|
|
|
|
@endcode |
|
|
|
|
|
|
|
|
|
@section neuro-backward Back Propagation |
|
|
|
|
|
|
|
|
|
@page biblio Bibliography |
|
|
|
|
|
|
|
|
|
- <a href="http://briandolhansky.com/blog/2014/10/30/artificial-neural-networks-matrix-form-part-5">Artificial Neural Networks: Matrix Form (Part 5)</a> |
|
|
|
|
- <a href="http://www.tornau.name/wp-content/uploads/2009/04/studiumsmaterialien/neuronale_netze_zusammefassung.pdf">Vorlesung Neuronale Netze - Zusammenfassung - Christoph Tornau</a> |
|
|
|
|
- <a href="http://www.neuronalesnetz.de/">Neuronale Netze — Eine Einführung</a> |
|
|
|
|
- <a href="http://alphard.ethz.ch/hafner/Vorles/Optim/ANN/Artificial%20Neural%20Network%20based%20Curve%20Prediction%20Documentation.pdf">Artificial Neural Network based Curve Prediction</a> |
|
|
|
|
- <a href="http://cs231n.github.io/convolutional-networks/">Convolutional Neural Networks (CNNs / ConvNets)</a> |
|
|
|
|
- <a href="https://www.tensorflow.org/versions/r0.9/tutorials/index.html">TensorFlow utorials</a> |
|
|
|
|
- <a href="https://www.tensorflow.org/versions/r0.9/tutorials/index.html">TensorFlow Tutorials</a> |
|
|
|
|
- <a href="http://alphard.ethz.ch/hafner/Vorles/Optim/ANN/Artificial%20Neural%20Network%20based%20Curve%20Prediction%20Documentation.pdf">Artificial Neural Network based Curve Prediction</a> |
|
|
|
|
|
|
|
|
|
*/ |
|
|
|
|
|
|
|
|
|
namespace math { |
|
|
|
|
// tangens hyperbolicus as standard activation function
|
|
|
|
|
template<typename TYPE> TYPE tanh(const TYPE& v) { |
|
|
|
|
return ::tanh((double)v); |
|
|
|
|
} |
|
|
|
|
// derivate of activation function for back propagation
|
|
|
|
|
template<typename TYPE> TYPE tanh_diff(const TYPE& v) { |
|
|
|
|
TYPE ch(::cosh((double)v)); |
|
|
|
|
return 1/(ch*ch); |
|
|
|
|
} |
|
|
|
|
} |
|
|
|
|
|
|
|
|
|
template |
|
|
|
|
<typename TYPE, |
|
|
|
|
size_t INPUT_LAYERS, |
|
|
|
|
size_t OUTPUT_LAYERS, |
|
|
|
|
size_t HIDDEN_LAYERS = INPUT_LAYERS+OUTPUT_LAYERS, |
|
|
|
|
size_t HIDDEN_LAYER_SIZE = INPUT_LAYERS+OUTPUT_LAYERS> |
|
|
|
|
size_t HIDDEN_LAYER_SIZE = INPUT_LAYERS+OUTPUT_LAYERS, |
|
|
|
|
TYPE(*ACTIVATION)(const TYPE&) = math::tanh<TYPE>, |
|
|
|
|
TYPE(*ACTIVATION_DIFF)(const TYPE&) = math::tanh_diff<TYPE>> |
|
|
|
|
class NeuroNet { |
|
|
|
|
public: |
|
|
|
|
NeuroNet() { |
|
|
|
|
} |
|
|
|
|
Matrix<TYPE, 1, OUTPUT_LAYERS> feed(Matrix<TYPE, 1, INPUT_LAYERS> in) { |
|
|
|
|
Matrix<TYPE, 1, HIDDEN_LAYER_SIZE> l((in*_wi).apply(ACTIVATION)); |
|
|
|
|
for (int i(0); i<HIDDEN_LAYERS-1; ++i) |
|
|
|
|
l = (l*_wh[i]).apply(ACTIVATION); |
|
|
|
|
Matrix<TYPE, 1, OUTPUT_LAYERS> out((l*_wo).apply(ACTIVATION)); |
|
|
|
|
return out; |
|
|
|
|
} |
|
|
|
|
private: |
|
|
|
|
Matrix<TYPE, INPUT_LAYERS, HIDDEN_LAYER_SIZE> _wi; |
|
|
|
|
Matrix<TYPE, HIDDEN_LAYER_SIZE, HIDDEN_LAYER_SIZE> _wh[HIDDEN_LAYERS-1]; |
|
|
|
|
Matrix<TYPE, HIDDEN_LAYER_SIZE, OUTPUT_LAYERS> _wo; |
|
|
|
|
}; |
|
|
|
|