/************************************************************************* Copying of neural network (second parameter is passed as shared object). INPUT PARAMETERS: Network1 - original OUTPUT PARAMETERS: Network2 - copy -- ALGLIB -- Copyright 04.11.2007 by Bochkanov Sergey *************************************************************************/ public static void mlpcopyshared(multilayerperceptron network1, multilayerperceptron network2) { int wcount = 0; int i = 0; hpccores.mlpbuffers buf = new hpccores.mlpbuffers(); smlpgrad sgrad = new smlpgrad(); // // Copy scalar and array fields // network2.hlnetworktype = network1.hlnetworktype; network2.hlnormtype = network1.hlnormtype; apserv.copyintegerarray(network1.hllayersizes, ref network2.hllayersizes); apserv.copyintegerarray(network1.hlconnections, ref network2.hlconnections); apserv.copyintegerarray(network1.hlneurons, ref network2.hlneurons); apserv.copyintegerarray(network1.structinfo, ref network2.structinfo); apserv.copyrealarray(network1.weights, ref network2.weights); apserv.copyrealarray(network1.columnmeans, ref network2.columnmeans); apserv.copyrealarray(network1.columnsigmas, ref network2.columnsigmas); apserv.copyrealarray(network1.neurons, ref network2.neurons); apserv.copyrealarray(network1.dfdnet, ref network2.dfdnet); apserv.copyrealarray(network1.derror, ref network2.derror); apserv.copyrealarray(network1.x, ref network2.x); apserv.copyrealarray(network1.y, ref network2.y); apserv.copyrealarray(network1.nwbuf, ref network2.nwbuf); apserv.copyintegerarray(network1.integerbuf, ref network2.integerbuf); // // copy buffers // wcount = mlpgetweightscount(network1); alglib.smp.ae_shared_pool_set_seed(network2.buf, buf); sgrad.g = new double[wcount]; sgrad.f = 0.0; for(i=0; i<=wcount-1; i++) { sgrad.g[i] = 0.0; } alglib.smp.ae_shared_pool_set_seed(network2.gradbuf, sgrad); }
/************************************************************************* Internal subroutine. -- ALGLIB -- Copyright 04.11.2007 by Bochkanov Sergey *************************************************************************/ private static void mlpcreate(int nin, int nout, int[] lsizes, int[] ltypes, int[] lconnfirst, int[] lconnlast, int layerscount, bool isclsnet, multilayerperceptron network) { int i = 0; int j = 0; int ssize = 0; int ntotal = 0; int wcount = 0; int offs = 0; int nprocessed = 0; int wallocated = 0; int[] localtemp = new int[0]; int[] lnfirst = new int[0]; int[] lnsyn = new int[0]; hpccores.mlpbuffers buf = new hpccores.mlpbuffers(); smlpgrad sgrad = new smlpgrad(); // // Check // alglib.ap.assert(layerscount>0, "MLPCreate: wrong parameters!"); alglib.ap.assert(ltypes[0]==-2, "MLPCreate: wrong LTypes[0] (must be -2)!"); for(i=0; i<=layerscount-1; i++) { alglib.ap.assert(lsizes[i]>0, "MLPCreate: wrong LSizes!"); alglib.ap.assert(lconnfirst[i]>=0 && (lconnfirst[i]<i || i==0), "MLPCreate: wrong LConnFirst!"); alglib.ap.assert(lconnlast[i]>=lconnfirst[i] && (lconnlast[i]<i || i==0), "MLPCreate: wrong LConnLast!"); } // // Build network geometry // lnfirst = new int[layerscount-1+1]; lnsyn = new int[layerscount-1+1]; ntotal = 0; wcount = 0; for(i=0; i<=layerscount-1; i++) { // // Analyze connections. // This code must throw an assertion in case of unknown LTypes[I] // lnsyn[i] = -1; if( ltypes[i]>=0 || ltypes[i]==-5 ) { lnsyn[i] = 0; for(j=lconnfirst[i]; j<=lconnlast[i]; j++) { lnsyn[i] = lnsyn[i]+lsizes[j]; } } else { if( (ltypes[i]==-2 || ltypes[i]==-3) || ltypes[i]==-4 ) { lnsyn[i] = 0; } } alglib.ap.assert(lnsyn[i]>=0, "MLPCreate: internal error #0!"); // // Other info // lnfirst[i] = ntotal; ntotal = ntotal+lsizes[i]; if( ltypes[i]==0 ) { wcount = wcount+lnsyn[i]*lsizes[i]; } } ssize = 7+ntotal*nfieldwidth; // // Allocate // network.structinfo = new int[ssize-1+1]; network.weights = new double[wcount-1+1]; if( isclsnet ) { network.columnmeans = new double[nin-1+1]; network.columnsigmas = new double[nin-1+1]; } else { network.columnmeans = new double[nin+nout-1+1]; network.columnsigmas = new double[nin+nout-1+1]; } network.neurons = new double[ntotal-1+1]; network.nwbuf = new double[Math.Max(wcount, 2*nout)-1+1]; network.integerbuf = new int[3+1]; network.dfdnet = new double[ntotal-1+1]; network.x = new double[nin-1+1]; network.y = new double[nout-1+1]; network.derror = new double[ntotal-1+1]; // // Fill structure: global info // network.structinfo[0] = ssize; network.structinfo[1] = nin; network.structinfo[2] = nout; network.structinfo[3] = ntotal; network.structinfo[4] = wcount; network.structinfo[5] = 7; if( isclsnet ) { network.structinfo[6] = 1; } else { network.structinfo[6] = 0; } // // Fill structure: neuron connections // nprocessed = 0; wallocated = 0; for(i=0; i<=layerscount-1; i++) { for(j=0; j<=lsizes[i]-1; j++) { offs = network.structinfo[5]+nprocessed*nfieldwidth; network.structinfo[offs+0] = ltypes[i]; if( ltypes[i]==0 ) { // // Adaptive summator: // * connections with weights to previous neurons // network.structinfo[offs+1] = lnsyn[i]; network.structinfo[offs+2] = lnfirst[lconnfirst[i]]; network.structinfo[offs+3] = wallocated; wallocated = wallocated+lnsyn[i]; nprocessed = nprocessed+1; } if( ltypes[i]>0 || ltypes[i]==-5 ) { // // Activation layer: // * each neuron connected to one (only one) of previous neurons. // * no weights // network.structinfo[offs+1] = 1; network.structinfo[offs+2] = lnfirst[lconnfirst[i]]+j; network.structinfo[offs+3] = -1; nprocessed = nprocessed+1; } if( (ltypes[i]==-2 || ltypes[i]==-3) || ltypes[i]==-4 ) { nprocessed = nprocessed+1; } } } alglib.ap.assert(wallocated==wcount, "MLPCreate: internal error #1!"); alglib.ap.assert(nprocessed==ntotal, "MLPCreate: internal error #2!"); // // Fill weights by small random values // Initialize means and sigmas // for(i=0; i<=nin-1; i++) { network.columnmeans[i] = 0; network.columnsigmas[i] = 1; } if( !isclsnet ) { for(i=0; i<=nout-1; i++) { network.columnmeans[nin+i] = 0; network.columnsigmas[nin+i] = 1; } } mlprandomize(network); // // Seed buffers // alglib.smp.ae_shared_pool_set_seed(network.buf, buf); sgrad.g = new double[wcount]; sgrad.f = 0.0; for(i=0; i<=wcount-1; i++) { sgrad.g[i] = 0.0; } alglib.smp.ae_shared_pool_set_seed(network.gradbuf, sgrad); }
public override alglib.apobject make_copy() { smlpgrad _result = new smlpgrad(); _result.f = f; _result.g = (double[])g.Clone(); return _result; }