|
|
@ -167,6 +167,7 @@ |
|
|
|
@page biblio Bibliography |
|
|
|
@page biblio Bibliography |
|
|
|
|
|
|
|
|
|
|
|
- <a href="http://briandolhansky.com/blog/2014/10/30/artificial-neural-networks-matrix-form-part-5">Artificial Neural Networks: Matrix Form (Part 5)</a> |
|
|
|
- <a href="http://briandolhansky.com/blog/2014/10/30/artificial-neural-networks-matrix-form-part-5">Artificial Neural Networks: Matrix Form (Part 5)</a> |
|
|
|
|
|
|
|
- <a href="http://briandolhansky.com/blog/2013/9/27/artificial-neural-networks-backpropagation-part-4">Artificial Neural Networks: Mathematics of Backpropagation (Part 4)</a> |
|
|
|
- <a href="http://www.tornau.name/wp-content/uploads/2009/04/studiumsmaterialien/neuronale_netze_zusammefassung.pdf">Vorlesung Neuronale Netze - Zusammenfassung - Christoph Tornau</a> |
|
|
|
- <a href="http://www.tornau.name/wp-content/uploads/2009/04/studiumsmaterialien/neuronale_netze_zusammefassung.pdf">Vorlesung Neuronale Netze - Zusammenfassung - Christoph Tornau</a> |
|
|
|
- <a href="http://www.neuronalesnetz.de/">Neuronale Netze — Eine Einführung</a> |
|
|
|
- <a href="http://www.neuronalesnetz.de/">Neuronale Netze — Eine Einführung</a> |
|
|
|
- <a href="http://alphard.ethz.ch/hafner/Vorles/Optim/ANN/Artificial%20Neural%20Network%20based%20Curve%20Prediction%20Documentation.pdf">Artificial Neural Network based Curve Prediction</a> |
|
|
|
- <a href="http://alphard.ethz.ch/hafner/Vorles/Optim/ANN/Artificial%20Neural%20Network%20based%20Curve%20Prediction%20Documentation.pdf">Artificial Neural Network based Curve Prediction</a> |
|
|
@ -179,11 +180,11 @@ |
|
|
|
namespace math { |
|
|
|
namespace math { |
|
|
|
// tangens hyperbolicus as standard activation function
|
|
|
|
// tangens hyperbolicus as standard activation function
|
|
|
|
template<typename TYPE> TYPE tanh(const TYPE& v) { |
|
|
|
template<typename TYPE> TYPE tanh(const TYPE& v) { |
|
|
|
return ::tanh((double)v); |
|
|
|
return ::tanh((long double)v); |
|
|
|
} |
|
|
|
} |
|
|
|
// derivate of activation function for back propagation
|
|
|
|
// derivate of activation function for back propagation
|
|
|
|
template<typename TYPE> TYPE tanh_diff(const TYPE& v) { |
|
|
|
template<typename TYPE> TYPE tanh_diff(const TYPE& v) { |
|
|
|
TYPE ch(::cosh((double)v)); |
|
|
|
TYPE ch(::cosh((long double)v)); |
|
|
|
return 1/(ch*ch); |
|
|
|
return 1/(ch*ch); |
|
|
|
} |
|
|
|
} |
|
|
|
} |
|
|
|
} |
|
|
@ -200,13 +201,19 @@ class NeuroNet { |
|
|
|
public: |
|
|
|
public: |
|
|
|
NeuroNet() { |
|
|
|
NeuroNet() { |
|
|
|
} |
|
|
|
} |
|
|
|
Matrix<TYPE, 1, OUTPUT_LAYERS> feed(Matrix<TYPE, 1, INPUT_LAYERS> in) { |
|
|
|
Matrix<TYPE, 1, OUTPUT_LAYERS> operator()(const Matrix<TYPE, 1, INPUT_LAYERS>& in) { |
|
|
|
Matrix<TYPE, 1, HIDDEN_LAYER_SIZE> l((in*_wi).apply(ACTIVATION)); |
|
|
|
Matrix<TYPE, 1, HIDDEN_LAYER_SIZE> l((in*_wi).apply(ACTIVATION)); |
|
|
|
for (int i(0); i<HIDDEN_LAYERS-1; ++i) |
|
|
|
for (int i(0); i<HIDDEN_LAYERS-1; ++i) |
|
|
|
l = (l*_wh[i]).apply(ACTIVATION); |
|
|
|
l = (l*_wh[i]).apply(ACTIVATION); |
|
|
|
Matrix<TYPE, 1, OUTPUT_LAYERS> out((l*_wo).apply(ACTIVATION)); |
|
|
|
Matrix<TYPE, 1, OUTPUT_LAYERS> out((l*_wo).apply(ACTIVATION)); |
|
|
|
return out; |
|
|
|
return out; |
|
|
|
} |
|
|
|
} |
|
|
|
|
|
|
|
Matrix<TYPE, 1, OUTPUT_LAYERS> learn(const Matrix<TYPE, 1, INPUT_LAYERS>& in, |
|
|
|
|
|
|
|
const Matrix<TYPE, 1, OUTPUT_LAYERS>& expect) { |
|
|
|
|
|
|
|
Matrix<TYPE, 1, OUTPUT_LAYERS> out((*this)(in)); |
|
|
|
|
|
|
|
Matrix<TYPE, 1, OUTPUT_LAYERS> diff(expect-out); |
|
|
|
|
|
|
|
return diff; |
|
|
|
|
|
|
|
} |
|
|
|
private: |
|
|
|
private: |
|
|
|
Matrix<TYPE, INPUT_LAYERS, HIDDEN_LAYER_SIZE> _wi; |
|
|
|
Matrix<TYPE, INPUT_LAYERS, HIDDEN_LAYER_SIZE> _wi; |
|
|
|
Matrix<TYPE, HIDDEN_LAYER_SIZE, HIDDEN_LAYER_SIZE> _wh[HIDDEN_LAYERS-1]; |
|
|
|
Matrix<TYPE, HIDDEN_LAYER_SIZE, HIDDEN_LAYER_SIZE> _wh[HIDDEN_LAYERS-1]; |
|
|
|