Lightweight Neural Network++ documentation

Visit the lwnnplus home page, download the technical report in english or italian.

Main Page | Class Hierarchy | Class List | File List | Class Members

network.h

00001 #ifndef NETWORK_H
00002 #define NETWORK_H
00003 /*
00004  * Lightweight Neural Net ++ 
00005  * http://lwneuralnetplus.sourceforge.net/
00006  *
00007  * This C++ library provides the class network wich implements a 
00008  * feed forward neural network with backpropagation,
00009  * You can use logistic or tanh as sigmoidal function
00010  * Library provides on line training, momentum, 
00011  * batch training and superSAB training.
00012  *
00013  * By Lorenzo Masetti <lorenzo.masetti@libero.it> and Luca Cinti <lucacinti@supereva.it>
00014  * Based on lwneuralnet C library by Peter Van Rossum <petervr@debian.org>, Luca Cinti and Lorenzo Masetti 
00015  * http://lwneuralnet.sourceforge.net
00016  *
00017  *
00018  * This library is free software; you can redistribute it and/or
00019  * modify it under the terms of the GNU Lesser General Public
00020  * License as published by the Free Software Foundation; either
00021  * version 2.1 of the License, or (at your option) any later version.
00022  * 
00023  */
00024 #define NET_LOGISTIC 0
00025 #define NET_TANH 1
00026 
00027 
00028 #include <stdio.h>
00029 #include <stdexcept>
00030 #include <iostream>
00031 #include <vector>
00032 using namespace std;
00033 
00036 class network
00037 {
00038 
00039 public:
00040 
00042   static const int LOGISTIC = NET_LOGISTIC;
00044   static const int TANH = NET_TANH;
00045 
00046 
00063     network (int activ, int no_of_layers, ...);
00064 
00065 
00082     network (int activ, vector<int> layers);
00083 
00084 
00091     network (const char *filename, bool binary = true);
00092 
00095     network (const network & b);
00096 
00099    ~network ();
00100 
00107   void randomize (float range);
00108 
00109   /****************************************
00110    * Accessors
00111    ****************************************/
00112 
00116   float get_momentum () const;
00117 
00121   float get_learning_rate () const;
00122 
00123 
00127   int get_no_of_inputs () const;
00128 
00129 
00133   int get_no_of_outputs () const;
00134 
00135 
00140   int get_no_of_layers () const;
00141 
00146   int get_no_of_neurons (int l) const;
00147 
00148 
00156   float get_weight (int l, int nl, int nu) const;
00157 
00158 
00162   int get_no_of_patterns () const;
00163 
00164 
00168   int get_activation () const;
00169 
00170 
00180   float get_output_error () const;
00181 
00182   /* Accessors for parameters of SuperSab */
00183 
00189   float get_max_learning_rate ();
00190 
00196   float get_min_learning_rate ();
00197 
00205   float get_ssab_up_factor ();
00206 
00214   float get_ssab_down_factor ();
00215 
00216 
00217   /****************************************
00218    * Mutators
00219    ****************************************/
00220 
00224   void set_learning_rate (float learning_rate);
00225 
00226 
00230   void set_activation (int num_func);
00231 
00232 
00236   void set_momentum (float momentum);
00237 
00238   /* Mutators for parameter of SuperSab training */
00239 
00240 
00253   void set_max_learning_rate (float max);
00254   
00255 
00268   void set_min_learning_rate (float min);
00269 
00277   void set_ssab_up_factor (float factor);
00278 
00286   void set_ssab_down_factor (float factor);
00287 
00288 
00289 
00290   /****************************************
00291    * File I/O for binary files
00292    ****************************************/
00293 
00294 
00301   void save (const char *filename) const;
00302 
00303 
00312   void load (const char *filename);
00313 
00314   /****************************************
00315    * Friendly printing
00316    ****************************************/
00317 
00323   void friendly_print (const bool show = false) const;
00324 
00325   /****************************************
00326    * File I/O for Text Files
00327    ****************************************/
00328 
00329   /* Please note that Text File format is provided for compatibility
00330    * with old lwneuralnet format but it should not be used.
00331    * Howewer textual format could be used
00332    * for portability between machines that use different binary encoding for
00333    * floating point numbers 
00334    *
00335    * NOTE FOR LWNEURALNET USERS:
00336    *
00337    * Text files containing networks created by lwneuralnet might have a 
00338    * different format, which does not have the number of sigmoidal function
00339    * as first information. But, since in old versions logistic function was 
00340    * the only one provided, you can convert those files by the command 
00341    * echo 0 > newfile.net; cat oldfile.net >> newfile.net
00342    *
00343    * 
00344    * Starting from version 0.88 textload method provides a solution to this
00345    * problem:
00346    * if the first number in text file is >= 2 it is interpreted as the number
00347    * of layers and the function is set to logistic.
00348    */
00349 
00352   void print () const;
00353 
00354 
00355 
00362   void textsave (const char *filename) const;
00363 
00364 
00370   void textload (const char *filename);
00371 
00372 
00373   /****************************************
00374    * Errors
00375    *
00376    * Before calling these routines, compute() should have been called to
00377    * compute the ouputs for a given input. This routine compares the
00378    * actual output of the neural network (which is stored internally in
00379    * the neural network) and the intended output (in target).
00380    *
00381    ****************************************/
00382 
00391   float compute_output_error (const float *target);
00392 
00393 
00401   float compute_average_error (const float *target) const;
00402 
00403 
00411   float compute_quadratic_error (const float *target) const;
00412 
00413 
00421   float compute_max_error (const float *target) const;
00422 
00423 
00424   /****************************************
00425    * Evaluation and Training
00426    ****************************************/
00427 
00437   void compute (const float *input, float *output);
00438 
00447   void train ();
00448 
00449 
00450   /****************************************
00451    * SuperSab
00452    ****************************************/
00453 
00454 
00458   bool is_ssab_active () const;
00459 
00463   int count_weights () const;
00464 
00475   int begin_ssab ();
00476 
00493   void train_ssab ();
00494 
00502   int reset_ssab ();
00503 
00504 
00510   void free_ssab ();
00511 
00512 
00517   bool save_ssab (const char *filename) const;
00518 
00519 
00524   bool load_ssab (const char *filename);
00525 
00526 
00531   int ssab_print_nus () const;
00532 
00533 
00542   int ssab_stats(float& average, float &max, float &min, int &n_max, int &n_min);
00543 
00544   /****************************************
00545    * Batch Training
00546    ****************************************/
00547 
00548 
00551   void begin_batch ();
00552 
00566   void train_batch ();
00567 
00568 
00574   void end_batch ();
00575 
00576 
00588   void end_batch_ssab ();
00589 
00590   /****************************************
00591    * Modification
00592    ****************************************/
00593 
00603   void jolt (float factor, float range);
00604 
00605   /****************************************
00606    * Overloaded operators
00607    ****************************************/
00608 
00611   const network & operator= (const network & b);
00612 
00613 
00614   /* PRIVATE */
00615 
00616 private:
00617   /* [Internal]
00618    * Structs for neurons and layers 
00619    */
00620 
00621   typedef struct
00622   {
00623     float output;
00624     float error;
00625     float *weight;
00626     float *delta;
00627     float *sumdeltas;
00628   }
00629   neuron_t;
00630 
00631   typedef struct
00632   {
00633     int no_of_neurons;
00634     neuron_t *neuron;
00635   }
00636   layer_t;
00637 
00638 
00639   void reset_deltas ();
00640   void reset_sumdeltas ();
00641   void reset_deltas_and_sumdeltas ();
00642 
00643   void allocate_layer (layer_t * layer, int no_of_neurons);
00644 
00645   void allocate_weights (layer_t * lower, layer_t * upper);
00646   void allocate_l (int act, int layers, const int *arglist);
00647 
00648   void fbprint (FILE * file) const;
00649   void fbscan (FILE * file);
00650 
00651   void do_load (const char *filename);
00652   void do_textload (const char *filename);
00653 
00654   void fprint (FILE * file) const;
00655 
00656   void fscan (FILE * file);
00657 
00658   void set_input (const float *input);
00659 
00660   void get_output (float *output);
00661 
00662   static float sigmoidal (float x, int num_func);
00663   void propagate_layer (layer_t * lower, layer_t * upper);
00664   void forward_pass ();
00665   void backpropagate_layer (layer_t * lower, layer_t * upper);
00666 
00667   void backward_pass ();
00668   void adjust_weights ();
00669 
00670   void adjust_weights_ssab ();
00671 
00672   int fprint_ssab (FILE * file) const;
00673 
00674   int fscan_ssab (FILE * file);
00675 
00676 
00677   void adjust_sumdeltas_batch ();
00678 
00679   void adjust_weights_batch ();
00680 
00681   void adjust_weights_batch_ssab ();
00682   void copy (const network & b);
00683   void destroy ();
00684 
00685 
00686   int no_of_layers;
00687   float momentum;
00688   float learning_rate;
00689   float global_error;
00690   int no_of_patterns;
00691   layer_t *layer;
00692   layer_t *input_layer;
00693   layer_t *output_layer;
00694   int activation;
00695   float *nus;
00696 
00697   float maxnu;
00698   float minnu;
00699   float nuup;
00700   float nudown;
00701 
00702   /* operator<< is declared friend because it needs to access private fields */
00703   friend ostream & operator<< (ostream &, const network &);
00704 
00705 };
00706 
00715 ostream & operator<< (ostream & os, const network & net);
00716 
00717 
00718 /****************************************
00719  * IMPLEMENTATION OF INLINE FUNCTIONS
00720  * ACCESSORS AND MUTATORS
00721  ****************************************/
00722 
00723 
00724 /****************************************
00725  * Accessors
00726  ****************************************/
00727 
00731 inline float
00732 network::get_momentum () const
00733 {
00734   return momentum;
00735 }
00736 
00740 inline float
00741 network::get_learning_rate () const
00742 {
00743   return learning_rate;
00744 }
00745 
00749 inline int
00750 network::get_no_of_inputs () const
00751 {
00752   return input_layer->no_of_neurons;
00753 }
00754 
00758 inline int
00759 network::get_no_of_outputs () const
00760 {
00761   return output_layer->no_of_neurons;
00762 }
00763 
00768 inline int
00769 network::get_no_of_layers () const
00770 {
00771   return no_of_layers;
00772 }
00773 
00777 inline int
00778 network::get_no_of_patterns () const
00779 {
00780   return no_of_patterns;
00781 }
00782 
00783 
00787 inline int
00788 network::get_activation () const
00789 {
00790   return activation;
00791 }
00792 
00802 inline float
00803 network::get_output_error () const
00804 {
00805   return global_error;
00806 }
00807 
00811 inline bool 
00812 network::is_ssab_active () const
00813 {
00814   return (nus != NULL);
00815 }
00816 
00817 
00818 
00824 inline float
00825 network::get_max_learning_rate ()
00826 {
00827   return maxnu;
00828 }
00829 
00835 inline float
00836 network::get_min_learning_rate ()
00837 {
00838   return minnu;
00839 }
00840 
00841 
00842 
00850 inline float
00851 network::get_ssab_up_factor ()
00852 {
00853   return nuup;
00854 }
00855 
00863 inline float
00864 network::get_ssab_down_factor ()
00865 {
00866   return nudown;
00867 }
00868 
00869 
00870 /****************************************
00871  * Mutators
00872  ****************************************/
00873 
00877 inline void
00878 network::set_learning_rate (float the_learning_rate)
00879 {
00880   learning_rate = the_learning_rate;
00881 }
00882 
00883 
00887 inline void
00888 network::set_momentum (float the_momentum)
00889 {
00890   momentum = the_momentum;
00891 }
00892 
00893 
00901 inline void
00902 network::set_ssab_up_factor (float factor)
00903 {
00904   nuup = factor;
00905 }
00906 
00914 inline void
00915 network::set_ssab_down_factor (float factor)
00916 {
00917   nudown = factor;
00918 }
00919 
00920 #endif /* NETWORK_H */
00921 
00922 
00923 
00924 

Generated on Tue Oct 12 00:32:11 2004 for Lightweight Neural Network ++ by  doxygen 1.3.9