PetaVision  Alpha
RescaleLayer.cpp
1 /*
2  * RescaleLayer.cpp
3  */
4 
5 #include "RescaleLayer.hpp"
6 #include <stdio.h>
7 
8 #include "../include/default_params.h"
9 
10 namespace PV {
11 RescaleLayer::RescaleLayer() { initialize_base(); }
12 
13 RescaleLayer::RescaleLayer(const char *name, HyPerCol *hc) {
14  initialize_base();
15  initialize(name, hc);
16 }
17 
18 RescaleLayer::~RescaleLayer() { free(rescaleMethod); }
19 
20 int RescaleLayer::initialize_base() {
21  originalLayer = NULL;
22  targetMax = 1;
23  targetMin = -1;
24  targetMean = 0;
25  targetStd = 1;
26  rescaleMethod = NULL;
27  patchSize = 1;
28  return PV_SUCCESS;
29 }
30 
31 int RescaleLayer::initialize(const char *name, HyPerCol *hc) {
32  int status_init = CloneVLayer::initialize(name, hc);
33 
34  return status_init;
35 }
36 
37 Response::Status
38 RescaleLayer::communicateInitInfo(std::shared_ptr<CommunicateInitInfoMessage const> message) {
39  return CloneVLayer::communicateInitInfo(message);
40  // CloneVLayer sets originalLayer and errors out if originalLayerName is not valid
41 }
42 
43 // Rescale layer does not use the V buffer, so absolutely fine to clone off of an null V layer
44 void RescaleLayer::allocateV() {
45  // Do nothing
46 }
47 
48 int RescaleLayer::ioParamsFillGroup(enum ParamsIOFlag ioFlag) {
50  ioParam_rescaleMethod(ioFlag);
51  if (strcmp(rescaleMethod, "maxmin") == 0) {
52  ioParam_targetMax(ioFlag);
53  ioParam_targetMin(ioFlag);
54  }
55  else if (strcmp(rescaleMethod, "meanstd") == 0) {
56  ioParam_targetMean(ioFlag);
57  ioParam_targetStd(ioFlag);
58  }
59  else if (strcmp(rescaleMethod, "pointmeanstd") == 0) {
60  ioParam_targetMean(ioFlag);
61  ioParam_targetStd(ioFlag);
62  }
63  else if (strcmp(rescaleMethod, "l2") == 0) {
64  ioParam_patchSize(ioFlag);
65  }
66  else if (strcmp(rescaleMethod, "l2NoMean") == 0) {
67  ioParam_patchSize(ioFlag);
68  }
69  else if (strcmp(rescaleMethod, "pointResponseNormalization") == 0) {
70  }
71  else if (strcmp(rescaleMethod, "zerotonegative") == 0) {
72  }
73  else if (strcmp(rescaleMethod, "softmax") == 0) {
74  }
75  else if (strcmp(rescaleMethod, "logreg") == 0) {
76  }
77  else {
78  Fatal().printf(
79  "RescaleLayer \"%s\": rescaleMethod does not exist. Current implemented methods are "
80  "maxmin, meanstd, pointmeanstd, pointResponseNormalization, softmax, l2, l2NoMean, and "
81  "logreg.\n",
82  name);
83  }
84  return PV_SUCCESS;
85 }
86 
87 void RescaleLayer::ioParam_rescaleMethod(enum ParamsIOFlag ioFlag) {
88  parent->parameters()->ioParamStringRequired(ioFlag, name, "rescaleMethod", &rescaleMethod);
89 }
90 
91 void RescaleLayer::ioParam_targetMax(enum ParamsIOFlag ioFlag) {
92  assert(!parent->parameters()->presentAndNotBeenRead(name, "rescaleMethod"));
93  if (strcmp(rescaleMethod, "maxmin") == 0) {
94  parent->parameters()->ioParamValue(ioFlag, name, "targetMax", &targetMax, targetMax);
95  }
96 }
97 
98 void RescaleLayer::ioParam_targetMin(enum ParamsIOFlag ioFlag) {
99  assert(!parent->parameters()->presentAndNotBeenRead(name, "rescaleMethod"));
100  if (strcmp(rescaleMethod, "maxmin") == 0) {
101  parent->parameters()->ioParamValue(ioFlag, name, "targetMin", &targetMin, targetMin);
102  }
103 }
104 
105 void RescaleLayer::ioParam_targetMean(enum ParamsIOFlag ioFlag) {
106  assert(!parent->parameters()->presentAndNotBeenRead(name, "rescaleMethod"));
107  if ((strcmp(rescaleMethod, "meanstd") == 0) || (strcmp(rescaleMethod, "pointmeanstd") == 0)) {
108  parent->parameters()->ioParamValue(ioFlag, name, "targetMean", &targetMean, targetMean);
109  }
110 }
111 
112 void RescaleLayer::ioParam_targetStd(enum ParamsIOFlag ioFlag) {
113  assert(!parent->parameters()->presentAndNotBeenRead(name, "rescaleMethod"));
114  if ((strcmp(rescaleMethod, "meanstd") == 0) || (strcmp(rescaleMethod, "pointmeanstd") == 0)) {
115  parent->parameters()->ioParamValue(ioFlag, name, "targetStd", &targetStd, targetStd);
116  }
117 }
118 
119 void RescaleLayer::ioParam_patchSize(enum ParamsIOFlag ioFlag) {
120  assert(!parent->parameters()->presentAndNotBeenRead(name, "rescaleMethod"));
121  if (strcmp(rescaleMethod, "l2") == 0 || strcmp(rescaleMethod, "l2NoMean") == 0) {
122  parent->parameters()->ioParamValue(ioFlag, name, "patchSize", &patchSize, patchSize);
123  }
124 }
125 
126 int RescaleLayer::setActivity() {
127  float *activity = clayer->activity->data;
128  memset(activity, 0, sizeof(float) * clayer->numExtendedAllBatches);
129  return 0;
130 }
131 
132 // GTK: changed to rescale activity instead of V
133 Response::Status RescaleLayer::updateState(double timef, double dt) {
134  int numNeurons = originalLayer->getNumNeurons();
135  float *A = clayer->activity->data;
136  const float *originalA = originalLayer->getCLayer()->activity->data;
137  const PVLayerLoc *loc = getLayerLoc();
138  const PVLayerLoc *locOriginal = originalLayer->getLayerLoc();
139  int nbatch = loc->nbatch;
140  // Make sure all sizes match
141  assert(locOriginal->nx == loc->nx);
142  assert(locOriginal->ny == loc->ny);
143  assert(locOriginal->nf == loc->nf);
144 
145  for (int b = 0; b < nbatch; b++) {
146  const float *originalABatch = originalA + b * originalLayer->getNumExtended();
147  float *ABatch = A + b * getNumExtended();
148 
149  if (strcmp(rescaleMethod, "maxmin") == 0) {
150  float maxA = -1000000000;
151  float minA = 1000000000;
152  // Find max and min of A
153  for (int k = 0; k < numNeurons; k++) {
154  int kextOriginal = kIndexExtended(
155  k,
156  locOriginal->nx,
157  locOriginal->ny,
158  locOriginal->nf,
159  locOriginal->halo.lt,
160  locOriginal->halo.rt,
161  locOriginal->halo.dn,
162  locOriginal->halo.up);
163  if (originalABatch[kextOriginal] > maxA) {
164  maxA = originalABatch[kextOriginal];
165  }
166  if (originalABatch[kextOriginal] < minA) {
167  minA = originalABatch[kextOriginal];
168  }
169  }
170 
171  MPI_Allreduce(
172  MPI_IN_PLACE,
173  &maxA,
174  1,
175  MPI_FLOAT,
176  MPI_MAX,
177  parent->getCommunicator()->communicator());
178  MPI_Allreduce(
179  MPI_IN_PLACE,
180  &minA,
181  1,
182  MPI_FLOAT,
183  MPI_MIN,
184  parent->getCommunicator()->communicator());
185 
186  float rangeA = maxA - minA;
187  if (rangeA != 0) {
188 #ifdef PV_USE_OPENMP_THREADS
189 #pragma omp parallel for
190 #endif // PV_USE_OPENMP_THREADS
191  for (int k = 0; k < numNeurons; k++) {
192  int kExt = kIndexExtended(
193  k,
194  loc->nx,
195  loc->ny,
196  loc->nf,
197  loc->halo.lt,
198  loc->halo.rt,
199  loc->halo.dn,
200  loc->halo.up);
201  int kExtOriginal = kIndexExtended(
202  k,
203  locOriginal->nx,
204  locOriginal->ny,
205  locOriginal->nf,
206  locOriginal->halo.lt,
207  locOriginal->halo.rt,
208  locOriginal->halo.dn,
209  locOriginal->halo.up);
210  ABatch[kExt] =
211  ((originalABatch[kExtOriginal] - minA) / rangeA) * (targetMax - targetMin)
212  + targetMin;
213  }
214  }
215  else {
216 #ifdef PV_USE_OPENMP_THREADS
217 #pragma omp parallel for
218 #endif // PV_USE_OPENMP_THREADS
219  for (int k = 0; k < numNeurons; k++) {
220  int kExt = kIndexExtended(
221  k,
222  loc->nx,
223  loc->ny,
224  loc->nf,
225  loc->halo.lt,
226  loc->halo.rt,
227  loc->halo.dn,
228  loc->halo.up);
229  ABatch[kExt] = (float)0;
230  }
231  }
232  }
233  else if (strcmp(rescaleMethod, "meanstd") == 0) {
234  float sum = 0;
235  float sumsq = 0;
236 // Find sum of originalA
237 #ifdef PV_USE_OPENMP_THREADS
238 #pragma omp parallel for reduction(+ : sum)
239 #endif
240  for (int k = 0; k < numNeurons; k++) {
241  int kextOriginal = kIndexExtended(
242  k,
243  locOriginal->nx,
244  locOriginal->ny,
245  locOriginal->nf,
246  locOriginal->halo.lt,
247  locOriginal->halo.rt,
248  locOriginal->halo.dn,
249  locOriginal->halo.up);
250  sum += originalABatch[kextOriginal];
251  }
252 
253  MPI_Allreduce(
254  MPI_IN_PLACE,
255  &sum,
256  1,
257  MPI_FLOAT,
258  MPI_SUM,
259  parent->getCommunicator()->communicator());
260 
261  float mean = sum / originalLayer->getNumGlobalNeurons();
262 
263 // Find (val - mean)^2 of originalA
264 #ifdef PV_USE_OPENMP_THREADS
265 #pragma omp parallel for reduction(+ : sumsq)
266 #endif
267  for (int k = 0; k < numNeurons; k++) {
268  int kextOriginal = kIndexExtended(
269  k,
270  locOriginal->nx,
271  locOriginal->ny,
272  locOriginal->nf,
273  locOriginal->halo.lt,
274  locOriginal->halo.rt,
275  locOriginal->halo.dn,
276  locOriginal->halo.up);
277  sumsq += (originalABatch[kextOriginal] - mean) * (originalABatch[kextOriginal] - mean);
278  }
279 
280  MPI_Allreduce(
281  MPI_IN_PLACE,
282  &sumsq,
283  1,
284  MPI_FLOAT,
285  MPI_SUM,
286  parent->getCommunicator()->communicator());
287  float std = sqrtf(sumsq / originalLayer->getNumGlobalNeurons());
288  // The difference between the if and the else clauses is only in the computation of
289  // A[kext], but this
290  // way the std != 0.0 conditional is only evaluated once, not every time through the
291  // for-loop.
292  if (std != 0.0f) {
293 #ifdef PV_USE_OPENMP_THREADS
294 #pragma omp parallel for
295 #endif
296  for (int k = 0; k < numNeurons; k++) {
297  int kext = kIndexExtended(
298  k,
299  loc->nx,
300  loc->ny,
301  loc->nf,
302  loc->halo.lt,
303  loc->halo.rt,
304  loc->halo.up,
305  loc->halo.dn);
306  int kextOriginal = kIndexExtended(
307  k,
308  locOriginal->nx,
309  locOriginal->ny,
310  locOriginal->nf,
311  locOriginal->halo.lt,
312  locOriginal->halo.rt,
313  locOriginal->halo.dn,
314  locOriginal->halo.up);
315  ABatch[kext] =
316  ((originalABatch[kextOriginal] - mean) * (targetStd / std) + targetMean);
317  }
318  }
319  else {
320 #ifdef PV_USE_OPENMP_THREADS
321 #pragma omp parallel for
322 #endif
323  for (int k = 0; k < numNeurons; k++) {
324  int kext = kIndexExtended(
325  k,
326  loc->nx,
327  loc->ny,
328  loc->nf,
329  loc->halo.lt,
330  loc->halo.rt,
331  loc->halo.up,
332  loc->halo.dn);
333  int kextOriginal = kIndexExtended(
334  k,
335  locOriginal->nx,
336  locOriginal->ny,
337  locOriginal->nf,
338  locOriginal->halo.lt,
339  locOriginal->halo.rt,
340  locOriginal->halo.dn,
341  locOriginal->halo.up);
342  ABatch[kext] = originalABatch[kextOriginal];
343  }
344  }
345  }
346  else if (strcmp(rescaleMethod, "l2") == 0) {
347  float sum = 0;
348  float sumsq = 0;
349 // Find sum of originalA
350 #ifdef PV_USE_OPENMP_THREADS
351 #pragma omp parallel for reduction(+ : sum)
352 #endif
353  for (int k = 0; k < numNeurons; k++) {
354  int kextOriginal = kIndexExtended(
355  k,
356  locOriginal->nx,
357  locOriginal->ny,
358  locOriginal->nf,
359  locOriginal->halo.lt,
360  locOriginal->halo.rt,
361  locOriginal->halo.dn,
362  locOriginal->halo.up);
363  sum += originalABatch[kextOriginal];
364  }
365 
366  MPI_Allreduce(
367  MPI_IN_PLACE,
368  &sum,
369  1,
370  MPI_FLOAT,
371  MPI_SUM,
372  parent->getCommunicator()->communicator());
373 
374  float mean = sum / originalLayer->getNumGlobalNeurons();
375 
376 // Find (val - mean)^2 of originalA
377 #ifdef PV_USE_OPENMP_THREADS
378 #pragma omp parallel for reduction(+ : sumsq)
379 #endif
380  for (int k = 0; k < numNeurons; k++) {
381  int kextOriginal = kIndexExtended(
382  k,
383  locOriginal->nx,
384  locOriginal->ny,
385  locOriginal->nf,
386  locOriginal->halo.lt,
387  locOriginal->halo.rt,
388  locOriginal->halo.dn,
389  locOriginal->halo.up);
390  sumsq += (originalABatch[kextOriginal] - mean) * (originalABatch[kextOriginal] - mean);
391  }
392 
393  MPI_Allreduce(
394  MPI_IN_PLACE,
395  &sumsq,
396  1,
397  MPI_FLOAT,
398  MPI_SUM,
399  parent->getCommunicator()->communicator());
400  float std = sqrtf(sumsq / originalLayer->getNumGlobalNeurons());
401  // The difference between the if and the else clauses is only in the computation of
402  // A[kext], but this
403  // way the std != 0.0 conditional is only evaluated once, not every time through the
404  // for-loop.
405  if (std != 0.0f) {
406 #ifdef PV_USE_OPENMP_THREADS
407 #pragma omp parallel for
408 #endif
409  for (int k = 0; k < numNeurons; k++) {
410  int kext = kIndexExtended(
411  k,
412  loc->nx,
413  loc->ny,
414  loc->nf,
415  loc->halo.lt,
416  loc->halo.rt,
417  loc->halo.up,
418  loc->halo.dn);
419  int kextOriginal = kIndexExtended(
420  k,
421  locOriginal->nx,
422  locOriginal->ny,
423  locOriginal->nf,
424  locOriginal->halo.lt,
425  locOriginal->halo.rt,
426  locOriginal->halo.dn,
427  locOriginal->halo.up);
428  ABatch[kext] =
429  ((originalABatch[kextOriginal] - mean)
430  * (1.0f / (std * sqrtf((float)patchSize))));
431  }
432  }
433  else {
434  WarnLog() << "std of layer " << originalLayer->getName()
435  << " is 0, layer remains unchanged\n";
436 #ifdef PV_USE_OPENMP_THREADS
437 #pragma omp parallel for
438 #endif
439  for (int k = 0; k < numNeurons; k++) {
440  int kext = kIndexExtended(
441  k,
442  loc->nx,
443  loc->ny,
444  loc->nf,
445  loc->halo.lt,
446  loc->halo.rt,
447  loc->halo.up,
448  loc->halo.dn);
449  int kextOriginal = kIndexExtended(
450  k,
451  locOriginal->nx,
452  locOriginal->ny,
453  locOriginal->nf,
454  locOriginal->halo.lt,
455  locOriginal->halo.rt,
456  locOriginal->halo.dn,
457  locOriginal->halo.up);
458  ABatch[kext] = originalABatch[kextOriginal];
459  }
460  }
461  }
462  else if (strcmp(rescaleMethod, "l2NoMean") == 0) {
463  float sumsq = 0;
464 #ifdef PV_USE_OPENMP_THREADS
465 #pragma omp parallel for reduction(+ : sumsq)
466 #endif
467  for (int k = 0; k < numNeurons; k++) {
468  int kextOriginal = kIndexExtended(
469  k,
470  locOriginal->nx,
471  locOriginal->ny,
472  locOriginal->nf,
473  locOriginal->halo.lt,
474  locOriginal->halo.rt,
475  locOriginal->halo.dn,
476  locOriginal->halo.up);
477  sumsq += (originalABatch[kextOriginal]) * (originalABatch[kextOriginal]);
478  }
479 
480 #ifdef PV_USE_MPI
481  MPI_Allreduce(
482  MPI_IN_PLACE,
483  &sumsq,
484  1,
485  MPI_FLOAT,
486  MPI_SUM,
487  parent->getCommunicator()->communicator());
488 #endif // PV_USE_MPI
489 
490  float std = sqrt(sumsq / originalLayer->getNumGlobalNeurons());
491  // The difference between the if and the else clauses is only in the computation of
492  // A[kext], but this
493  // way the std != 0.0 conditional is only evaluated once, not every time through the
494  // for-loop.
495  if (std != 0.0f) {
496 #ifdef PV_USE_OPENMP_THREADS
497 #pragma omp parallel for
498 #endif
499  for (int k = 0; k < numNeurons; k++) {
500  int kext = kIndexExtended(
501  k,
502  loc->nx,
503  loc->ny,
504  loc->nf,
505  loc->halo.lt,
506  loc->halo.rt,
507  loc->halo.up,
508  loc->halo.dn);
509  int kextOriginal = kIndexExtended(
510  k,
511  locOriginal->nx,
512  locOriginal->ny,
513  locOriginal->nf,
514  locOriginal->halo.lt,
515  locOriginal->halo.rt,
516  locOriginal->halo.dn,
517  locOriginal->halo.up);
518  ABatch[kext] =
519  ((originalABatch[kextOriginal]) * (1.0f / (std * sqrtf((float)patchSize))));
520  }
521  }
522  else {
523  WarnLog() << "std of layer " << originalLayer->getName()
524  << " is 0, layer remains unchanged\n";
525 #ifdef PV_USE_OPENMP_THREADS
526 #pragma omp parallel for
527 #endif
528  for (int k = 0; k < numNeurons; k++) {
529  int kext = kIndexExtended(
530  k,
531  loc->nx,
532  loc->ny,
533  loc->nf,
534  loc->halo.lt,
535  loc->halo.rt,
536  loc->halo.up,
537  loc->halo.dn);
538  int kextOriginal = kIndexExtended(
539  k,
540  locOriginal->nx,
541  locOriginal->ny,
542  locOriginal->nf,
543  locOriginal->halo.lt,
544  locOriginal->halo.rt,
545  locOriginal->halo.dn,
546  locOriginal->halo.up);
547  ABatch[kext] = originalABatch[kextOriginal];
548  }
549  }
550  }
551  else if (strcmp(rescaleMethod, "pointResponseNormalization") == 0) {
552  int nx = loc->nx;
553  int ny = loc->ny;
554  int nf = loc->nf;
555  PVHalo const *halo = &loc->halo;
556  PVHalo const *haloOrig = &locOriginal->halo;
557 // Loop through all nx and ny
558 // each y value specifies a different target so ok to thread here (sum, sumsq are defined inside
559 // loop)
560 #ifdef PV_USE_OPENMP_THREADS
561 #pragma omp parallel for
562 #endif
563  for (int iY = 0; iY < ny; iY++) {
564  for (int iX = 0; iX < nx; iX++) {
565  // Find sum sq in feature space
566  float sumsq = 0;
567  for (int iF = 0; iF < nf; iF++) {
568  int kext = kIndex(
569  iX,
570  iY,
571  iF,
572  nx + haloOrig->lt + haloOrig->rt,
573  ny + haloOrig->dn + haloOrig->up,
574  nf);
575  sumsq += (originalABatch[kext]) * (originalABatch[kext]);
576  }
577  float divisor = sqrtf(sumsq);
578  // Difference in the if-part and else-part is only in the value assigned to A[kext],
579  // but this way the std != 0
580  // conditional does not have to be reevaluated every time through the for loop.
581  // can't pragma omp parallel the for loops because it was already parallelized in the
582  // outermost for-loop
583  if (divisor != 0) {
584  for (int iF = 0; iF < nf; iF++) {
585  int kextOrig = kIndex(
586  iX,
587  iY,
588  iF,
589  nx + haloOrig->lt + haloOrig->rt,
590  ny + haloOrig->dn + haloOrig->up,
591  nf);
592  int kext = kIndex(
593  iX, iY, iF, nx + halo->lt + halo->rt, ny + halo->dn + halo->up, nf);
594  ABatch[kext] = (originalABatch[kextOrig] / divisor);
595  }
596  }
597  else {
598  for (int iF = 0; iF < nf; iF++) {
599  int kextOrig = kIndex(
600  iX,
601  iY,
602  iF,
603  nx + haloOrig->lt + haloOrig->rt,
604  ny + haloOrig->dn + haloOrig->up,
605  nf);
606  int kext = kIndex(
607  iX, iY, iF, nx + halo->lt + halo->rt, ny + halo->dn + halo->up, nf);
608  ABatch[kext] = originalABatch[kextOrig];
609  }
610  }
611  }
612  }
613  }
614  else if (strcmp(rescaleMethod, "pointmeanstd") == 0) {
615  int nx = loc->nx;
616  int ny = loc->ny;
617  int nf = loc->nf;
618  PVHalo const *halo = &loc->halo;
619  PVHalo const *haloOrig = &locOriginal->halo;
620 // Loop through all nx and ny
621 // each y value specifies a different target so ok to thread here (sum, sumsq are defined inside
622 // loop)
623 #ifdef PV_USE_OPENMP_THREADS
624 #pragma omp parallel for
625 #endif
626  for (int iY = 0; iY < ny; iY++) {
627  for (int iX = 0; iX < nx; iX++) {
628  // Find sum and sum sq in feature space
629  float sum = 0;
630  float sumsq = 0;
631  for (int iF = 0; iF < nf; iF++) {
632  int kext = kIndex(
633  iX,
634  iY,
635  iF,
636  nx + haloOrig->lt + haloOrig->rt,
637  ny + haloOrig->dn + haloOrig->up,
638  nf);
639  sum += originalABatch[kext];
640  }
641  float mean = sum / nf;
642  for (int iF = 0; iF < nf; iF++) {
643  int kext = kIndex(
644  iX,
645  iY,
646  iF,
647  nx + haloOrig->lt + haloOrig->rt,
648  ny + haloOrig->dn + haloOrig->up,
649  nf);
650  sumsq += (originalABatch[kext] - mean) * (originalABatch[kext] - mean);
651  }
652  float std = sqrtf(sumsq / nf);
653  // Difference in the if-part and else-part is only in the value assigned to A[kext],
654  // but this way the std != 0
655  // conditional does not have to be reevaluated every time through the for loop.
656  // can't pragma omp parallel the for loops because it was already parallelized in the
657  // outermost for-loop
658  if (std != 0) {
659  for (int iF = 0; iF < nf; iF++) {
660  int kextOrig = kIndex(
661  iX,
662  iY,
663  iF,
664  nx + haloOrig->lt + haloOrig->rt,
665  ny + haloOrig->dn + haloOrig->up,
666  nf);
667  int kext = kIndex(
668  iX, iY, iF, nx + halo->lt + halo->rt, ny + halo->dn + halo->up, nf);
669  ABatch[kext] =
670  ((originalABatch[kextOrig] - mean) * (targetStd / std) + targetMean);
671  }
672  }
673  else {
674  for (int iF = 0; iF < nf; iF++) {
675  int kextOrig = kIndex(
676  iX,
677  iY,
678  iF,
679  nx + haloOrig->lt + haloOrig->rt,
680  ny + haloOrig->dn + haloOrig->up,
681  nf);
682  int kext = kIndex(
683  iX, iY, iF, nx + halo->lt + halo->rt, ny + halo->dn + halo->up, nf);
684  ABatch[kext] = originalABatch[kextOrig];
685  }
686  }
687  }
688  }
689  }
690  else if (strcmp(rescaleMethod, "softmax") == 0) {
691  int nx = loc->nx;
692  int ny = loc->ny;
693  int nf = loc->nf;
694  PVHalo const *halo = &loc->halo;
695  PVHalo const *haloOrig = &locOriginal->halo;
696 // Loop through all nx and ny
697 // each y value specifies a different target so ok to thread here (sum, sumsq are defined inside
698 // loop)
699 #ifdef PV_USE_OPENMP_THREADS
700 #pragma omp parallel for
701 #endif
702  for (int iY = 0; iY < ny; iY++) {
703  for (int iX = 0; iX < nx; iX++) {
704  float sumexpx = 0;
705  // To prevent overflow, we subtract the max raw value before taking the exponential
706  float maxvalue = FLT_MIN;
707  for (int iF = 0; iF < nf; iF++) {
708  int kextOrig = kIndex(
709  iX,
710  iY,
711  iF,
712  nx + haloOrig->lt + haloOrig->rt,
713  ny + haloOrig->dn + haloOrig->up,
714  nf);
715  maxvalue = std::max(maxvalue, originalABatch[kextOrig]);
716  }
717  for (int iF = 0; iF < nf; iF++) {
718  int kextOrig = kIndex(
719  iX,
720  iY,
721  iF,
722  nx + haloOrig->lt + haloOrig->rt,
723  ny + haloOrig->dn + haloOrig->up,
724  nf);
725  sumexpx += expf(originalABatch[kextOrig] - maxvalue);
726  }
727  // can't pragma omp parallel the for loops because it was already parallelized in the
728  // outermost for-loop
729  for (int iF = 0; iF < nf; iF++) {
730  int kextOrig = kIndex(
731  iX,
732  iY,
733  iF,
734  nx + haloOrig->lt + haloOrig->rt,
735  ny + haloOrig->dn + haloOrig->up,
736  nf);
737  int kext =
738  kIndex(iX, iY, iF, nx + halo->lt + halo->rt, ny + halo->dn + halo->up, nf);
739  if (sumexpx != 0.0f && sumexpx == sumexpx) { // Check for zero and NaN
740  ABatch[kext] = expf(originalABatch[kextOrig] - maxvalue) / sumexpx;
741  }
742  else {
743  ABatch[kext] = 0.0f;
744  }
745  assert(ABatch[kext] >= 0 && ABatch[kext] <= 1);
746  }
747  }
748  }
749  }
750  else if (strcmp(rescaleMethod, "logreg") == 0) {
751  int nx = loc->nx;
752  int ny = loc->ny;
753  int nf = loc->nf;
754 // Loop through all nx and ny
755 // each y value specifies a different target so ok to thread here (sum, sumsq are defined inside
756 // loop)
757 #ifdef PV_USE_OPENMP_THREADS
758 #pragma omp parallel for
759 #endif
760  for (int k = 0; k < numNeurons; k++) {
761  int kext = kIndexExtended(
762  k,
763  loc->nx,
764  loc->ny,
765  loc->nf,
766  loc->halo.lt,
767  loc->halo.rt,
768  loc->halo.up,
769  loc->halo.dn);
770  int kextOriginal = kIndexExtended(
771  k,
772  locOriginal->nx,
773  locOriginal->ny,
774  locOriginal->nf,
775  locOriginal->halo.lt,
776  locOriginal->halo.rt,
777  locOriginal->halo.dn,
778  locOriginal->halo.up);
779  ABatch[kext] = 1.0f / (1.0f + expf(originalABatch[kextOriginal]));
780  }
781  }
782  else if (strcmp(rescaleMethod, "zerotonegative") == 0) {
783  PVHalo const *halo = &loc->halo;
784  PVHalo const *haloOrig = &locOriginal->halo;
785 #ifdef PV_USE_OPENMP_THREADS
786 #pragma omp parallel for
787 #endif
788  for (int k = 0; k < numNeurons; k++) {
789  int kextOriginal = kIndexExtended(
790  k,
791  locOriginal->nx,
792  locOriginal->ny,
793  locOriginal->nf,
794  haloOrig->lt,
795  haloOrig->rt,
796  haloOrig->dn,
797  haloOrig->up);
798  int kext = kIndexExtended(
799  k, loc->nx, loc->ny, loc->nf, halo->lt, halo->rt, halo->dn, halo->up);
800  if (originalABatch[kextOriginal] == 0) {
801  ;
802  ABatch[kext] = -1;
803  }
804  else {
805  ABatch[kext] = originalABatch[kextOriginal];
806  }
807  }
808  }
809  }
810  return Response::SUCCESS;
811 }
812 
813 } // end namespace PV
virtual int ioParamsFillGroup(enum ParamsIOFlag ioFlag) override
Definition: CloneVLayer.cpp:34
int ioParamsFillGroup(enum ParamsIOFlag ioFlag) override