// @(#)root/tmva $Id: MethodCFMlpANN_Utils.h 23334 2008-04-19 18:38:57Z brun $ // Author: Andreas Hoecker, Joerg Stelzer, Helge Voss, Kai Voss /********************************************************************************** * Project: TMVA - a Root-integrated toolkit for multivariate data analysis * * Package: TMVA * * Class : MethodCFMlpANN_utils * * Web : http://tmva.sourceforge.net * * * * Reference for the original FORTRAN version "mlpl3.F": * * Authors : J. Proriol and contributions from ALEPH-Clermont-Fd * * Team members * * Copyright: Laboratoire Physique Corpusculaire * * Universite de Blaise Pascal, IN2P3/CNRS * * Description: * * Utility routine, obtained via f2c from original mlpl3.F FORTRAN routine * * * * Authors (alphabetical): * * Andreas Hoecker - CERN, Switzerland * * Xavier Prudent - LAPP, France * * Helge Voss - MPI-K Heidelberg, Germany * * Kai Voss - U. of Victoria, Canada * * * * Copyright (c) 2005: * * CERN, Switzerland * * U. of Victoria, Canada * * MPI-K Heidelberg, Germany * * LAPP, Annecy, France * * * * Redistribution and use in source and binary forms, with or without * * modification, are permitted according to the terms listed in LICENSE * * (http://tmva.sourceforge.net/LICENSE) * **********************************************************************************/ #ifndef ROOT_TMVA_MethodCFMlpANN_Utils #define ROOT_TMVA_MethodCFMlpANN_Utils #ifndef ROOT_TMVA_MethodCFMlpANN_def #include "TMVA/MethodCFMlpANN_def.h" #endif #ifndef ROOT_TObject #include "TObject.h" #endif ////////////////////////////////////////////////////////////////////////// // // // MethodCFMlpANN_Utils // // // // Implementation of Clermond-Ferrand artificial neural network // // // ////////////////////////////////////////////////////////////////////////// namespace TMVA { class MethodCFMlpANN_Utils { public: MethodCFMlpANN_Utils(); virtual ~MethodCFMlpANN_Utils(); protected: void Train_nn( Double_t *tin2, Double_t *tout2, Int_t *ntrain, Int_t *ntest, Int_t *nvar2, Int_t *nlayer, Int_t *nodes, Int_t *ncycle ); void Entree_new( Int_t *, char *, Int_t *ntrain, Int_t *ntest, Int_t *numlayer, Int_t *nodes, Int_t *numcycle, Int_t ); virtual Int_t DataInterface( Double_t*, Double_t*, Int_t*, Int_t*, Int_t*, Int_t*, Double_t*, Int_t*, Int_t* ) = 0; Double_t Fdecroi(Int_t *i__); Double_t Sen3a(void); void Wini(); void En_avant(Int_t *ievent); void En_avant2(Int_t *ievent); void En_arriere(Int_t *ievent); void Leclearn(Int_t *ktest, Double_t *tout2, Double_t *tin2); void Out(Int_t *iii, Int_t *maxcycle); void Cout(Int_t *, Double_t *xxx); void Innit(char *det, Double_t *tout2, Double_t *tin2, Int_t ); void TestNN(); void Inl(); void GraphNN(Int_t *ilearn, Double_t *, Double_t *, char *, Int_t); void Foncf(Int_t *i__, Double_t *u, Double_t *f); void Cout2(Int_t * /*i1*/, Double_t *yyy); void Lecev2(Int_t *ktest, Double_t *tout2, Double_t *tin2); void Arret(const char* mot ); void CollectVar(Int_t *nvar, Int_t *class__, Double_t *xpg); protected: static Int_t fg_100; // constant static Int_t fg_0; // constant static Int_t fg_max_nVar_; // static maximum number of input variables static Int_t fg_max_nNodes_; // maximum number of nodes per variable static Int_t fg_999; // constant Double_t W_ref(const Double_t wNN[], Int_t a_1, Int_t a_2, Int_t a_3) const { return wNN [(a_3*max_nNodes_ + a_2)*max_nLayers_ + a_1 - 187]; } Double_t& W_ref(Double_t wNN[], Int_t a_1, Int_t a_2, Int_t a_3) { return wNN [((a_3)*max_nNodes_ + (a_2))*max_nLayers_ + a_1 - 187]; } Double_t Ww_ref(const Double_t wwNN[], Int_t a_1,Int_t a_2) const { return wwNN[(a_2)*max_nLayers_ + a_1 - 7]; } Double_t& Ww_ref(Double_t wwNN[], Int_t a_1,Int_t a_2) { return wwNN[(a_2)*max_nLayers_ + a_1 - 7]; } // ANN training parameters struct { Double_t epsmin, epsmax, eeps, eta; Int_t layerm, lclass, nevl, nblearn, nunilec, nunisor, nunishort, nunap; Int_t nvar, itest, ndiv, ichoi, ndivis, nevt; } fParam_1; // ANN training results struct { Double_t xmax[max_nVar_], xmin[max_nVar_]; Int_t nclass[max_Events_], mclass[max_Events_], iclass; } fVarn_1; // dynamic data table class VARn2 { public: VARn2() { fxx = 0; } ~VARn2() { Delete(); } void Create( Int_t nevt, Int_t nvar ) { fNevt = nevt+1; fNvar = nvar+1; // fortran array style 1...N fxx = new Double_t*[fNevt]; for (Int_t i=0; i abort ***\n") ; // exit(1); return fxx[0][0]; } } void Delete( void ) { if (0 != fxx) for (Int_t i=0; i