diff --git a/COPYING b/COPYING index 88798ab..caeca07 120000 --- a/COPYING +++ b/COPYING @@ -1 +1 @@ -/usr/share/automake-1.15/COPYING \ No newline at end of file +/usr/share/automake-1.14/COPYING \ No newline at end of file diff --git a/ChangeLog b/ChangeLog index d8e9b83..b427b51 100644 --- a/ChangeLog +++ b/ChangeLog @@ -1,3 +1,16 @@ +2017-01-08 08:55 + + * [r12] bootstrap.sh, debian/changelog.in: + rebuilt debian/changelog.in + +2017-01-07 14:06 + + * [r11] COPYING, ChangeLog, INSTALL, ax_init_standard_project.m4, + bootstrap.sh, build-in-docker.conf, build-in-docker.sh, + debian/control.in, doc/plantuml.jar[ADD], + resolve-debbuilddeps.sh, resolve-rpmbuilddeps.sh: + build system updated + 2016-10-30 20:16 * [r10] configure.ac: diff --git a/INSTALL b/INSTALL index ddcdb76..f812f5a 120000 --- a/INSTALL +++ b/INSTALL @@ -1 +1 @@ -/usr/share/automake-1.15/INSTALL \ No newline at end of file +/usr/share/automake-1.14/INSTALL \ No newline at end of file diff --git a/src/neuron.hxx b/src/neuron.hxx index e9629b1..7ccaec0 100644 --- a/src/neuron.hxx +++ b/src/neuron.hxx @@ -6,6 +6,7 @@ // 45678901234567890123456789012345678901234567890123456789012345678901234567890 #include +#include /** @mainpage Neural Network with Hidden Layers @@ -66,9 +67,9 @@ @c l of hidden layers, where each of them contains @c h neurons. - A neural network with double precision is inistialized as: + A neural network with double precision is initialized as: @code - NeuroNet net; + NeuroNet net; @endcode @dot @@ -133,35 +134,81 @@ to the first hidden layer, @c H2 from the first to the second, and so on, until @c Hl+1 contains the weights from layer @c l to the output @c O. + + There is also an activation function @f. For back propagation, + this function needs a first derivation @c f'. + + To get the activation of the first hidden layer, the input vector + is multiplied with the weight matrix of the first hidden layer, + this results in an output vector. Then the activation function is + applied to all values of the output vector: + +
+    V1 = f(I×H1)
+    
+ + This is done for all layers, up to the output. The output vector + is then calculated as: - The output vector is then calculated as: - O = I × H1 × H2 × H × Hl+1 +
+    O = f(f(f(f(I×H1)×H2)×H)×Hl+1)
+    
@code const size_type i(4); const size_type o(2); NeuroNet net; Matrix<1, i> input(1.0, 2.0, 0.0, -1.0); - Matrix<1, o> output = net(input); + Matrix<1, o> output = feed(input); @endcode @section neuro-backward Back Propagation @page biblio Bibliography + - Artificial Neural Networks: Matrix Form (Part 5) - Vorlesung Neuronale Netze - Zusammenfassung - Christoph Tornau - Neuronale Netze — Eine Einführung - Artificial Neural Network based Curve Prediction - Convolutional Neural Networks (CNNs / ConvNets) - - TensorFlow utorials + - TensorFlow Tutorials - Artificial Neural Network based Curve Prediction */ + +namespace math { + // tangens hyperbolicus as standard activation function + template TYPE tanh(const TYPE& v) { + return ::tanh((double)v); + } + // derivate of activation function for back propagation + template TYPE tanh_diff(const TYPE& v) { + TYPE ch(::cosh((double)v)); + return 1/(ch*ch); + } +} + template + size_t HIDDEN_LAYER_SIZE = INPUT_LAYERS+OUTPUT_LAYERS, + TYPE(*ACTIVATION)(const TYPE&) = math::tanh, + TYPE(*ACTIVATION_DIFF)(const TYPE&) = math::tanh_diff> class NeuroNet { + public: + NeuroNet() { + } + Matrix feed(Matrix in) { + Matrix l((in*_wi).apply(ACTIVATION)); + for (int i(0); i out((l*_wo).apply(ACTIVATION)); + return out; + } + private: + Matrix _wi; + Matrix _wh[HIDDEN_LAYERS-1]; + Matrix _wo; }; diff --git a/test/neuron.cxx b/test/neuron.cxx index ed3c6ea..7ea2a9b 100644 --- a/test/neuron.cxx +++ b/test/neuron.cxx @@ -8,7 +8,7 @@ // 1 2 3 4 5 6 7 8 // 45678901234567890123456789012345678901234567890123456789012345678901234567890 - +#include #include #include @@ -19,15 +19,36 @@ /// @todo Rename DummyTest and DummyTest::dummy() /// @todo Write test cases -class DummyTest: public CppUnit::TestFixture { +class NeuroNetTest: public CppUnit::TestFixture { public: - void dummy() { + void simplexor() { + NeuroNet neuronet; + Matrix in[] = {{1, 1}, + {1, -1}, + {-1, 1}, + {-1, -1}}; + Matrix out[] = {-1, + 1, + 1, + -1}; + for (int i(0); i