#include <iostream>
#include <string>
#include <stdlib.h>
#include "Riostream.h"
#include "TMath.h"
#include "TMatrix.h"
#include "TObjString.h"
#include "TMVA/MethodCFMlpANN.h"
#include "TMVA/MethodCFMlpANN_def.h"
ClassImp(TMVA::MethodCFMlpANN)
namespace TMVA {
Int_t MethodCFMlpANN_nsel = 0;
}
TMVA::MethodCFMlpANN* TMVA::MethodCFMlpANN::fgThis = 0;
TMVA::MethodCFMlpANN::MethodCFMlpANN( const TString& jobName, const TString& methodTitle, DataSet& theData,
const TString& theOption, TDirectory* theTargetDir )
: TMVA::MethodBase( jobName, methodTitle, theData, theOption, theTargetDir ),
fData(0),
fClass(0),
fNlayers(0),
fNcycles(0),
fNodes(0),
fYNN(0),
fLayerSpec("")
{
InitCFMlpANN();
SetConfigName( TString("Method") + GetMethodName() );
DeclareOptions();
ParseOptions();
ProcessOptions();
fLogger << "Use " << fNcycles << " training cycles" << Endl;
if (HasTrainingTree()) {
Int_t nEvtTrain = Data().GetNEvtTrain();
fData = new TMatrix( nEvtTrain, GetNvar() );
fClass = new vector<Int_t>( nEvtTrain );
Int_t ivar;
for (Int_t ievt=0; ievt<nEvtTrain; ievt++) {
ReadTrainingEvent(ievt);
(*fClass)[ievt] = IsSignalEvent() ? 1 : 2;
for (ivar=0; ivar<GetNvar(); ivar++) {
(*fData)( ievt, ivar ) = GetEventVal(ivar);
}
}
fLogger << kVERBOSE << Data().GetNEvtSigTrain() << " Signal and "
<< Data().GetNEvtBkgdTrain() << " background" << " events in trainingTree" << Endl;
}
}
TMVA::MethodCFMlpANN::MethodCFMlpANN( DataSet & theData,
const TString& theWeightFile,
TDirectory* theTargetDir )
: TMVA::MethodBase( theData, theWeightFile, theTargetDir )
, fNodes(0)
{
InitCFMlpANN();
DeclareOptions();
}
void TMVA::MethodCFMlpANN::DeclareOptions()
{
DeclareOptionRef( fNcycles = 3000, "NCycles", "Number of training cycles" );
DeclareOptionRef( fLayerSpec = "N-1,N-2", "HiddenLayers", "Specification of hidden layer architecture (N stands for number of variables; any integers may also be used)" );
}
void TMVA::MethodCFMlpANN::ProcessOptions()
{
MethodBase::ProcessOptions();
fNodes = new Int_t[20];
fNlayers = 2;
Int_t currentHiddenLayer = 1;
TString layerSpec(fLayerSpec);
while(layerSpec.Length()>0) {
TString sToAdd = "";
if (layerSpec.First(',')<0) {
sToAdd = layerSpec;
layerSpec = "";
}
else {
sToAdd = layerSpec(0,layerSpec.First(','));
layerSpec = layerSpec(layerSpec.First(',')+1,layerSpec.Length());
}
Int_t nNodes = 0;
if (sToAdd.BeginsWith("N") || sToAdd.BeginsWith("n")) { sToAdd.Remove(0,1); nNodes = GetNvar(); }
nNodes += atoi(sToAdd);
fNodes[currentHiddenLayer++] = nNodes;
fNlayers++;
}
fNodes[0] = GetNvar();
fNodes[fNlayers-1] = 2;
fLogger << kINFO << "Use configuration (nodes per layer): in=";
for (Int_t i=0; i<fNlayers-1; i++) fLogger << kINFO << fNodes[i] << ":";
fLogger << kINFO << fNodes[fNlayers-1] << "=out" << Endl;
}
void TMVA::MethodCFMlpANN::InitCFMlpANN( void )
{
SetMethodName( "CFMlpANN" );
SetMethodType( TMVA::Types::kCFMlpANN );
SetTestvarName();
SetNormalised( kTRUE );
fgThis = this;
TMVA::MethodCFMlpANN_nsel = 0;
}
TMVA::MethodCFMlpANN::~MethodCFMlpANN( void )
{
delete fData;
delete fClass;
delete[] fNodes;
if (fYNN!=0) {
for (Int_t i=0; i<fNlayers; i++) delete[] fYNN[i];
delete[] fYNN;
fYNN=0;
}
}
void TMVA::MethodCFMlpANN::Train( void )
{
if (!CheckSanity()) fLogger << kFATAL << "<Train> sanity check failed" << Endl;
Double_t dumDat(0);
Int_t ntrain(Data().GetNEvtTrain());
Int_t ntest(0);
Int_t nvar(GetNvar());
Int_t nlayers(fNlayers);
Int_t *nodes = new Int_t[nlayers];
Int_t ncycles(fNcycles);
for (Int_t i=0; i<nlayers; i++) nodes[i] = fNodes[i];
#ifndef R__WIN32
Train_nn( &dumDat, &dumDat, &ntrain, &ntest, &nvar, &nlayers, nodes, &ncycles );
#else
fLogger << kWARNING << "<Train> sorry CFMlpANN is not yet implemented on Windows" << Endl;
#endif
delete [] nodes;
}
Double_t TMVA::MethodCFMlpANN::GetMvaValue()
{
Bool_t isOK = kTRUE;
vector<Double_t> inputVec( GetNvar() );
for (Int_t ivar=0; ivar<GetNvar(); ivar++) inputVec[ivar] = GetEventVal(ivar);
Double_t myMVA = EvalANN( inputVec, isOK );
if (!isOK) fLogger << kFATAL << "EvalANN returns (!isOK) for event " << Endl;
return myMVA;
}
Double_t TMVA::MethodCFMlpANN::EvalANN( vector<Double_t>& inVar, Bool_t& isOK )
{
Double_t* xeev = new Double_t[GetNvar()];
for (Int_t ivar=0; ivar<GetNvar(); ivar++) xeev[ivar] = inVar[ivar];
isOK = kTRUE;
for (Int_t jvar=0; jvar<GetNvar(); jvar++) {
if (fVarn_1.xmax[jvar] < xeev[jvar]) xeev[jvar] = fVarn_1.xmax[jvar];
if (fVarn_1.xmin[jvar] > xeev[jvar]) xeev[jvar] = fVarn_1.xmin[jvar];
if (fVarn_1.xmax[jvar] == fVarn_1.xmin[jvar]) {
isOK = kFALSE;
xeev[jvar] = 0;
}
else {
xeev[jvar] = xeev[jvar] - ((fVarn_1.xmax[jvar] + fVarn_1.xmin[jvar])/2);
xeev[jvar] = xeev[jvar] / ((fVarn_1.xmax[jvar] - fVarn_1.xmin[jvar])/2);
}
}
NN_ava( xeev );
Double_t retval = 0.5*(1.0 + fYNN[fParam_1.layerm-1][0]);
delete [] xeev;
return retval;
}
void TMVA::MethodCFMlpANN::NN_ava( Double_t* xeev )
{
for (Int_t ivar=0; ivar<fNeur_1.neuron[0]; ivar++) fYNN[0][ivar] = xeev[ivar];
for (Int_t layer=1; layer<fParam_1.layerm; layer++) {
for (Int_t j=1; j<=fNeur_1.neuron[layer]; j++) {
Double_t x = Ww_ref(fNeur_1.ww, layer+1,j);
for (Int_t k=1; k<=fNeur_1.neuron[layer-1]; k++) {
x += fYNN[layer-1][k-1]*W_ref(fNeur_1.w, layer+1, j, k);
}
fYNN[layer][j-1] = NN_fonc( layer, x );
}
}
}
Double_t TMVA::MethodCFMlpANN::NN_fonc( Int_t i, Double_t u ) const
{
Double_t f(0);
if (u/fDel_1.temp[i] > 170) f = +1;
else if (u/fDel_1.temp[i] < -170) f = -1;
else {
Double_t yy = TMath::Exp(-u/fDel_1.temp[i]);
f = (1 - yy)/(1 + yy);
}
return f;
}
void TMVA::MethodCFMlpANN::ReadWeightsFromStream( istream & istr )
{
TString var("");
Int_t nva(0), lclass(0);
istr >> nva >> lclass;
if (GetNvar() != nva)
fLogger << kFATAL << "<ReadWeightsFromFile> mismatch in number of variables" << Endl;
if (lclass != 2)
fLogger << kFATAL << "<ReadWeightsFromFile> mismatch in number of classes" << Endl;
if (istr.eof( ))
fLogger << kFATAL << "<ReadWeightsFromStream> reached EOF prematurely " << Endl;
for (Int_t ivar=0; ivar<GetNvar(); ivar++)
istr >> fVarn_1.xmax[ivar] >> fVarn_1.xmin[ivar];
istr >> fParam_1.layerm;
if (fYNN != 0) {
for (Int_t i=0; i<fNlayers; i++) delete[] fYNN[i];
delete[] fYNN;
fYNN = 0;
}
fYNN = new Double_t*[fParam_1.layerm];
for (Int_t layer=0; layer<fParam_1.layerm; layer++) {
istr >> fNeur_1.neuron[layer];
fYNN[layer] = new Double_t[fNeur_1.neuron[layer]];
}
const Int_t nchar( 100 );
char* dumchar = new char[nchar];
for (Int_t layer=1; layer<=fParam_1.layerm-1; layer++) {
Int_t nq = fNeur_1.neuron[layer]/10;
Int_t nr = fNeur_1.neuron[layer] - nq*10;
Int_t kk(0);
if (nr==0) kk = nq;
else kk = nq+1;
for (Int_t k=1; k<=kk; k++) {
Int_t jmin = 10*k - 9;
Int_t jmax = 10*k;
if (fNeur_1.neuron[layer]<jmax) jmax = fNeur_1.neuron[layer];
for (Int_t j=jmin; j<=jmax; j++) {
istr >> Ww_ref(fNeur_1.ww, layer+1, j);
}
for (Int_t i=1; i<=fNeur_1.neuron[layer-1]; i++) {
for (Int_t j=jmin; j<=jmax; j++) {
istr >> W_ref(fNeur_1.w, layer+1, j, i);
}
}
istr.getline( dumchar, nchar );
}
}
for (Int_t layer=0; layer<fParam_1.layerm; layer++) {
istr.getline( dumchar, nchar );
istr.getline( dumchar, nchar );
istr >> fDel_1.temp[layer];
}
if (GetNvar() != fNeur_1.neuron[0]) {
fLogger << kFATAL << "<ReadWeightsFromFile> mismatch in zeroth layer:"
<< GetNvar() << " " << fNeur_1.neuron[0] << Endl;
}
fNlayers = fParam_1.layerm;
}
Int_t TMVA::MethodCFMlpANN::DataInterface( Double_t* , Double_t* ,
Int_t* , Int_t* ,
Int_t* , Int_t* nvar,
Double_t* xpg, Int_t* iclass, Int_t* ikend )
{
*ikend = 0;
TMVA::MethodCFMlpANN* opt = TMVA::MethodCFMlpANN::This();
if (0 == xpg) {
fLogger << kFATAL << "ERROR in MethodCFMlpANN_DataInterface zero pointer xpg" << Endl;
}
if (*nvar != opt->GetNvar()) {
fLogger << kFATAL << "ERROR in MethodCFMlpANN_DataInterface mismatch in num of variables: "
<< *nvar << " " << opt->GetNvar() << Endl;
}
*iclass = (int)opt->GetClass( TMVA::MethodCFMlpANN_nsel );
for (Int_t ivar=0; ivar<opt->GetNvar(); ivar++)
xpg[ivar] = (double)opt->GetData( TMVA::MethodCFMlpANN_nsel, ivar );
++TMVA::MethodCFMlpANN_nsel;
return 0;
}
void TMVA::MethodCFMlpANN::WriteWeightsToStream( std::ostream& o ) const
{
o << fParam_1.nvar << " " << fParam_1.lclass << endl;
if (fParam_1.lclass != 2)
fLogger << kFATAL << "<WriteWeightsToStream> mismatch in number of classes" << Endl;
for (Int_t ivar=0; ivar<fParam_1.nvar; ivar++)
o << fVarn_1.xmax[ivar] << " " << fVarn_1.xmin[ivar] << endl;
o << fParam_1.layerm << endl;
for (Int_t layer=0; layer<fParam_1.layerm; layer++) {
o << fNeur_1.neuron[layer] << " ";
}
o << endl;
for (Int_t layer=1; layer<=fParam_1.layerm-1; layer++) {
Int_t nq = fNeur_1.neuron[layer]/10;
Int_t nr = fNeur_1.neuron[layer] - nq*10;
Int_t kk(0);
if (nr==0) kk = nq;
else kk = nq+1;
for (Int_t k=1; k<=kk; k++) {
Int_t jmin = 10*k - 9;
Int_t jmax = 10*k;
Int_t i, j;
if (fNeur_1.neuron[layer]<jmax) jmax = fNeur_1.neuron[layer];
for (j=jmin; j<=jmax; j++) {
o << setw(20) << Ww_ref(fNeur_1.ww, layer+1, j) << " ";
}
o << endl;
for (i=1; i<=fNeur_1.neuron[layer-1]; i++) {
for (j=jmin; j<=jmax; j++) {
o << setw(20) << W_ref(fNeur_1.w, layer+1, j, i) << " ";
}
o << endl;
}
o << endl << endl;
}
}
for (Int_t layer=0; layer<fParam_1.layerm; layer++) {
o << endl << endl;
o << fDel_1.temp[layer] << endl;
}
}
void TMVA::MethodCFMlpANN::PrintWeights( std::ostream & o ) const
{
o << "Number of vars " << fParam_1.nvar << endl;
o << "Output nodes " << fParam_1.lclass << endl;
for (Int_t ivar=0; ivar<fParam_1.nvar; ivar++)
o << "Var " << ivar << " [" << fVarn_1.xmin[ivar] << " - " << fVarn_1.xmax[ivar] << "]" << endl;
o << "Number of layers " << fParam_1.layerm << endl;
o << "Nodes per layer ";
for (Int_t layer=0; layer<fParam_1.layerm; layer++)
o << fNeur_1.neuron[layer] << " ";
o << endl;
for (Int_t layer=1; layer<=fParam_1.layerm-1; layer++) {
Int_t nq = fNeur_1.neuron[layer]/10;
Int_t nr = fNeur_1.neuron[layer] - nq*10;
Int_t kk(0);
if (nr==0) kk = nq;
else kk = nq+1;
for (Int_t k=1; k<=kk; k++) {
Int_t jmin = 10*k - 9;
Int_t jmax = 10*k;
Int_t i, j;
if (fNeur_1.neuron[layer]<jmax) jmax = fNeur_1.neuron[layer];
for (j=jmin; j<=jmax; j++) {
o << Ww_ref(fNeur_1.ww, layer+1, j) << " ";
}
o << endl;
for (i=1; i<=fNeur_1.neuron[layer-1]; i++) {
for (j=jmin; j<=jmax; j++) {
o << W_ref(fNeur_1.w, layer+1, j, i) << " ";
}
o << endl;
}
o << endl;
}
}
for (Int_t layer=0; layer<fParam_1.layerm; layer++) {
o << "Del.temp in layer " << layer << " : " << fDel_1.temp[layer] << endl;
}
}
void TMVA::MethodCFMlpANN::MakeClassSpecific( std::ostream& fout, const TString& className ) const
{
fout << " // not implemented for class: \"" << className << "\"" << endl;
fout << "};" << endl;
}
void TMVA::MethodCFMlpANN::MakeClassSpecificHeader( std::ostream& , const TString& ) const
{
}
void TMVA::MethodCFMlpANN::GetHelpMessage() const
{
fLogger << Endl;
fLogger << gTools().Color("bold") << "--- Short description:" << gTools().Color("reset") << Endl;
fLogger << Endl;
fLogger << "Sorry - not available" << Endl;
fLogger << Endl;
fLogger << gTools().Color("bold") << "--- Performance optimisation:" << gTools().Color("reset") << Endl;
fLogger << Endl;
fLogger << "Sorry - not available" << Endl;
fLogger << Endl;
fLogger << gTools().Color("bold") << "--- Performance tuning via configuration options:" << gTools().Color("reset") << Endl;
fLogger << Endl;
fLogger << "Sorry - not available" << Endl;
}
Last change: Sat Nov 1 10:21:41 2008
Last generated: 2008-11-01 10:21
This page has been automatically generated. If you have any comments or suggestions about the page layout send a mail to ROOT support, or contact the developers with any questions or problems regarding ROOT.