learn - not yet implemented, test does not yet work

master
Marc Wäckerlin 8 years ago
parent fcfe5871f1
commit fc88d7b8fa
  1. 13
      src/neuron.hxx
  2. 8
      test/neuron.cxx

@ -167,6 +167,7 @@
@page biblio Bibliography @page biblio Bibliography
- <a href="http://briandolhansky.com/blog/2014/10/30/artificial-neural-networks-matrix-form-part-5">Artificial Neural Networks: Matrix Form (Part 5)</a> - <a href="http://briandolhansky.com/blog/2014/10/30/artificial-neural-networks-matrix-form-part-5">Artificial Neural Networks: Matrix Form (Part 5)</a>
- <a href="http://briandolhansky.com/blog/2013/9/27/artificial-neural-networks-backpropagation-part-4">Artificial Neural Networks: Mathematics of Backpropagation (Part 4)</a>
- <a href="http://www.tornau.name/wp-content/uploads/2009/04/studiumsmaterialien/neuronale_netze_zusammefassung.pdf">Vorlesung Neuronale Netze - Zusammenfassung - Christoph Tornau</a> - <a href="http://www.tornau.name/wp-content/uploads/2009/04/studiumsmaterialien/neuronale_netze_zusammefassung.pdf">Vorlesung Neuronale Netze - Zusammenfassung - Christoph Tornau</a>
- <a href="http://www.neuronalesnetz.de/">Neuronale Netze Eine Einführung</a> - <a href="http://www.neuronalesnetz.de/">Neuronale Netze Eine Einführung</a>
- <a href="http://alphard.ethz.ch/hafner/Vorles/Optim/ANN/Artificial%20Neural%20Network%20based%20Curve%20Prediction%20Documentation.pdf">Artificial Neural Network based Curve Prediction</a> - <a href="http://alphard.ethz.ch/hafner/Vorles/Optim/ANN/Artificial%20Neural%20Network%20based%20Curve%20Prediction%20Documentation.pdf">Artificial Neural Network based Curve Prediction</a>
@ -179,11 +180,11 @@
namespace math { namespace math {
// tangens hyperbolicus as standard activation function // tangens hyperbolicus as standard activation function
template<typename TYPE> TYPE tanh(const TYPE& v) { template<typename TYPE> TYPE tanh(const TYPE& v) {
return ::tanh((double)v); return ::tanh((long double)v);
} }
// derivate of activation function for back propagation // derivate of activation function for back propagation
template<typename TYPE> TYPE tanh_diff(const TYPE& v) { template<typename TYPE> TYPE tanh_diff(const TYPE& v) {
TYPE ch(::cosh((double)v)); TYPE ch(::cosh((long double)v));
return 1/(ch*ch); return 1/(ch*ch);
} }
} }
@ -200,13 +201,19 @@ class NeuroNet {
public: public:
NeuroNet() { NeuroNet() {
} }
Matrix<TYPE, 1, OUTPUT_LAYERS> feed(Matrix<TYPE, 1, INPUT_LAYERS> in) { Matrix<TYPE, 1, OUTPUT_LAYERS> operator()(const Matrix<TYPE, 1, INPUT_LAYERS>& in) {
Matrix<TYPE, 1, HIDDEN_LAYER_SIZE> l((in*_wi).apply(ACTIVATION)); Matrix<TYPE, 1, HIDDEN_LAYER_SIZE> l((in*_wi).apply(ACTIVATION));
for (int i(0); i<HIDDEN_LAYERS-1; ++i) for (int i(0); i<HIDDEN_LAYERS-1; ++i)
l = (l*_wh[i]).apply(ACTIVATION); l = (l*_wh[i]).apply(ACTIVATION);
Matrix<TYPE, 1, OUTPUT_LAYERS> out((l*_wo).apply(ACTIVATION)); Matrix<TYPE, 1, OUTPUT_LAYERS> out((l*_wo).apply(ACTIVATION));
return out; return out;
} }
Matrix<TYPE, 1, OUTPUT_LAYERS> learn(const Matrix<TYPE, 1, INPUT_LAYERS>& in,
const Matrix<TYPE, 1, OUTPUT_LAYERS>& expect) {
Matrix<TYPE, 1, OUTPUT_LAYERS> out((*this)(in));
Matrix<TYPE, 1, OUTPUT_LAYERS> diff(expect-out);
return diff;
}
private: private:
Matrix<TYPE, INPUT_LAYERS, HIDDEN_LAYER_SIZE> _wi; Matrix<TYPE, INPUT_LAYERS, HIDDEN_LAYER_SIZE> _wi;
Matrix<TYPE, HIDDEN_LAYER_SIZE, HIDDEN_LAYER_SIZE> _wh[HIDDEN_LAYERS-1]; Matrix<TYPE, HIDDEN_LAYER_SIZE, HIDDEN_LAYER_SIZE> _wh[HIDDEN_LAYERS-1];

@ -31,12 +31,16 @@ class NeuroNetTest: public CppUnit::TestFixture {
1, 1,
1, 1,
-1}; -1};
for (int step(0); step<10; ++step)
for (int i(0); i<sizeof(in)/sizeof(*in); ++i)
for (int rep(0); rep<10; ++rep)
auto res(neuronet.learn(in[i], out[i]));
for (int i(0); i<sizeof(in)/sizeof(*in); ++i) { for (int i(0); i<sizeof(in)/sizeof(*in); ++i) {
std::cout<<in[i]<<""<<out[i]<<" ~ " std::cout<<in[i]<<""<<out[i]<<" ~ "
<<neuronet.feed(in[i]).apply([](float&v){ <<neuronet(in[i]).apply([](float&v){
v = v<0 ? -1.0 : 1.0; v = v<0 ? -1.0 : 1.0;
})<<std::endl; })<<std::endl;
auto res(neuronet.feed(in[i]) auto res(neuronet(in[i])
.apply([](float&v){ .apply([](float&v){
std::cout<<"v="<<v<<std::endl; std::cout<<"v="<<v<<std::endl;
v = v<0 ? -1.0 : 1.0; v = v<0 ? -1.0 : 1.0;

Loading…
Cancel
Save