PetaVision  Alpha
FirmThresholdCostFnProbe.cpp
1 /*
2  * FirmThresholdCostFnProbe.cpp
3  *
4  * Created on: Aug 14, 2015
5  * Author: pschultz
6  */
7 
8 #include "FirmThresholdCostFnProbe.hpp"
9 #include "columns/HyPerCol.hpp"
10 #include "layers/ANNLayer.hpp" // To get VThresh and VWidth from targetLayer if it's an ANNLayer
11 
12 namespace PV {
13 
14 FirmThresholdCostFnProbe::FirmThresholdCostFnProbe() : AbstractNormProbe() { initialize_base(); }
15 
16 FirmThresholdCostFnProbe::FirmThresholdCostFnProbe(const char *name, HyPerCol *hc)
17  : AbstractNormProbe() {
18  initialize_base();
19  initialize(name, hc);
20 }
21 
22 int FirmThresholdCostFnProbe::initialize_base() {
23  VThresh = (float)0;
24  VWidth = (float)0;
25  return PV_SUCCESS;
26 }
27 
28 FirmThresholdCostFnProbe::~FirmThresholdCostFnProbe() {}
29 
30 int FirmThresholdCostFnProbe::initialize(const char *name, HyPerCol *hc) {
31  return AbstractNormProbe::initialize(name, hc);
32 }
33 
34 int FirmThresholdCostFnProbe::ioParamsFillGroup(enum ParamsIOFlag ioFlag) {
35  int status = AbstractNormProbe::ioParamsFillGroup(ioFlag);
36  ioParam_VThresh(ioFlag);
37  ioParam_VWidth(ioFlag);
38  return status;
39 }
40 
41 void FirmThresholdCostFnProbe::ioParam_VThresh(enum ParamsIOFlag ioFlag) {
42  parent->parameters()->ioParamValue(
43  ioFlag, name, "VThresh", &VThresh, VThresh /*default*/, false /*warnIfAbsent*/);
44 }
45 
46 void FirmThresholdCostFnProbe::ioParam_VWidth(enum ParamsIOFlag ioFlag) {
47  parent->parameters()->ioParamValue(
48  ioFlag, name, "VWidth", &VWidth, VWidth /*default*/, false /*warnIfAbsent*/);
49 }
50 
52  return setNormDescriptionToString("Cost function");
53 }
54 
56  std::shared_ptr<CommunicateInitInfoMessage const> message) {
57  auto status = AbstractNormProbe::communicateInitInfo(message);
58  if (!Response::completed(status)) {
59  return status;
60  }
61  ANNLayer *targetANNLayer = dynamic_cast<ANNLayer *>(getTargetLayer());
62  if (targetANNLayer != nullptr) {
63  if (!parent->parameters()->present(getName(), "VThresh")) {
64  VThresh = targetANNLayer->getVThresh();
65  }
66  if (!parent->parameters()->present(getName(), "VWidth")) {
67  VWidth = targetANNLayer->getVWidth();
68  }
69  }
70  else {
71  // Reread VThresh and VWidth commands, this time warning if they are not
72  // absent.
73  parent->parameters()->ioParamValue(
74  PARAMS_IO_READ, name, "VThresh", &VThresh, VThresh /*default*/, true /*warnIfAbsent*/);
75  parent->parameters()->ioParamValue(
76  PARAMS_IO_READ, name, "VThresh", &VThresh, VThresh /*default*/, true /*warnIfAbsent*/);
77  }
78  return Response::SUCCESS;
79 }
80 
81 double FirmThresholdCostFnProbe::getValueInternal(double timevalue, int index) {
82  if (index < 0 || index >= parent->getNBatch()) {
83  return PV_FAILURE;
84  }
85  PVLayerLoc const *loc = getTargetLayer()->getLayerLoc();
86  int const nx = loc->nx;
87  int const ny = loc->ny;
88  int const nf = loc->nf;
89  PVHalo const *halo = &loc->halo;
90  int const lt = halo->lt;
91  int const rt = halo->rt;
92  int const dn = halo->dn;
93  int const up = halo->up;
94  double sum = 0.0;
95  double VThreshPlusVWidth = VThresh + VWidth;
96  double amax = 0.5 * VThreshPlusVWidth;
97  double a2 = 0.5 / VThreshPlusVWidth;
98  float const *aBuffer =
99  getTargetLayer()->getLayerData() + index * getTargetLayer()->getNumExtended();
100 
101  if (getMaskLayer()) {
102  PVLayerLoc const *maskLoc = getMaskLayer()->getLayerLoc();
103  PVHalo const *maskHalo = &maskLoc->halo;
104  float const *maskLayerData =
106  + index * getMaskLayer()->getNumExtended(); // Is there a DataStore method to return the
107  // part of the layer data for a given batch
108  // index?
109  int const maskLt = maskHalo->lt;
110  int const maskRt = maskHalo->rt;
111  int const maskDn = maskHalo->dn;
112  int const maskUp = maskHalo->up;
113  if (maskHasSingleFeature()) {
114  assert(getTargetLayer()->getNumNeurons() == nx * ny * nf);
115  int nxy = nx * ny;
116 #ifdef PV_USE_OPENMP_THREADS
117 #pragma omp parallel for reduction(+ : sum)
118 #endif // PV_USE_OPENMP_THREADS
119  for (int kxy = 0; kxy < nxy; kxy++) {
120  int kexMask = kIndexExtended(kxy, nx, ny, 1, maskLt, maskRt, maskDn, maskUp);
121  if (maskLayerData[kexMask] == 0) {
122  continue;
123  }
124  int featureBase = kxy * nf;
125  for (int f = 0; f < nf; f++) {
126  int kex = kIndexExtended(featureBase++, nx, ny, nf, lt, rt, dn, up);
127  double a = (double)fabs(aBuffer[kex]);
128  if (a >= VThreshPlusVWidth) {
129  sum += amax;
130  }
131  else {
132  sum += a * (1.0 - a2 * a);
133  }
134  }
135  }
136  }
137  else {
138 #ifdef PV_USE_OPENMP_THREADS
139 #pragma omp parallel for reduction(+ : sum)
140 #endif // PV_USE_OPENMP_THREADS
141  for (int k = 0; k < getTargetLayer()->getNumNeurons(); k++) {
142  int kex = kIndexExtended(k, nx, ny, nf, lt, rt, dn, up);
143  double a = (double)fabs(aBuffer[kex]);
144  if (a == 0) {
145  continue;
146  }
147  int kexMask = kIndexExtended(k, nx, ny, nf, maskLt, maskRt, maskDn, maskUp);
148  if (maskLayerData[kexMask] == 0) {
149  continue;
150  }
151  if (a >= VThreshPlusVWidth) {
152  sum += amax;
153  }
154  else {
155  sum += a * (1.0 - a2 * a);
156  }
157  }
158  }
159  }
160  else {
161  if (getTargetLayer()->getSparseFlag()) {
162  PVLayerCube cube = getTargetLayer()->getPublisher()->createCube();
163  long int numActive = cube.numActive[index];
164  int numItems = cube.numItems / cube.loc.nbatch;
165  SparseList<float>::Entry const *activeList =
166  &((SparseList<float>::Entry *)cube.activeIndices)[index * numItems];
167 #ifdef PV_USE_OPENMP_THREADS
168 #pragma omp parallel for reduction(+ : sum)
169 #endif // PV_USE_OPENMP_THREADS
170  for (int k = 0; k < numActive; k++) {
171  int extIndex = activeList[k].index;
172  int inRestricted = !extendedIndexInBorderRegion(
173  extIndex, nx, ny, nf, halo->lt, halo->rt, halo->dn, halo->up);
174  double a = inRestricted * (double)fabs(aBuffer[extIndex]);
175  if (a >= VThreshPlusVWidth) {
176  sum += amax;
177  }
178  else {
179  sum += a * (1.0 - a2 * a);
180  }
181  }
182  }
183  else {
184 #ifdef PV_USE_OPENMP_THREADS
185 #pragma omp parallel for reduction(+ : sum)
186 #endif // PV_USE_OPENMP_THREADS
187  for (int k = 0; k < getTargetLayer()->getNumNeurons(); k++) {
188  int kex = kIndexExtended(k, nx, ny, nf, lt, rt, dn, up);
189  double a = (double)fabs(aBuffer[kex]);
190  if (a == 0) {
191  continue;
192  }
193  if (a >= VThreshPlusVWidth) {
194  sum += amax;
195  }
196  else {
197  sum += a * (1.0 - a2 * a);
198  }
199  }
200  }
201  }
202 
203  return sum;
204 }
205 
206 } // end namespace PV
virtual int setNormDescription() override
virtual void ioParam_VThresh(enum ParamsIOFlag ioFlag)
VThresh: The threshold where the transfer function returns 0 if |V|<VThresh. If the target layer is a...
virtual Response::Status communicateInitInfo(std::shared_ptr< CommunicateInitInfoMessage const > message) override
virtual void ioParam_VWidth(enum ParamsIOFlag ioFlag)
VWidth: The width of the interval over which the transfer function changes from hard to soft threshol...
int present(const char *groupName, const char *paramName)
Definition: PVParams.cpp:1254
PVLayerCube createCube(int delay=0)
Definition: Publisher.cpp:60
virtual double getValueInternal(double timevalue, int index) override
virtual int ioParamsFillGroup(enum ParamsIOFlag ioFlag) override
virtual int ioParamsFillGroup(enum ParamsIOFlag ioFlag) override
static bool completed(Status &a)
Definition: Response.hpp:49
int setNormDescriptionToString(char const *s)
virtual Response::Status communicateInitInfo(std::shared_ptr< CommunicateInitInfoMessage const > message) override
const float * getLayerData(int delay=0)