9 #include "MaskLayer.hpp" 13 MaskLayer::MaskLayer(
const char *name, HyPerCol *hc) {
18 MaskLayer::MaskLayer() {
23 MaskLayer::~MaskLayer() {
35 int MaskLayer::initialize_base() {
46 ioParam_maskMethod(ioFlag);
47 ioParam_maskLayerName(ioFlag);
48 ioParam_featureIdxs(ioFlag);
52 void MaskLayer::ioParam_maskMethod(
enum ParamsIOFlag ioFlag) {
53 parent->parameters()->ioParamStringRequired(ioFlag, name,
"maskMethod", &maskMethod);
55 if (strcmp(maskMethod,
"layer") == 0) {
57 else if (strcmp(maskMethod,
"invertLayer") == 0) {
59 else if (strcmp(maskMethod,
"maskFeatures") == 0) {
61 else if (strcmp(maskMethod,
"noMaskFeatures") == 0) {
64 if (parent->columnId() == 0) {
66 "%s: \"%s\" is not a valid maskMethod. Options are \"layer\", \"invertLayer\", " 67 "\"maskFeatures\", or \"noMaskFeatures\".\n",
75 void MaskLayer::ioParam_maskLayerName(
enum ParamsIOFlag ioFlag) {
76 assert(!parent->parameters()->presentAndNotBeenRead(name,
"maskMethod"));
77 if (strcmp(maskMethod,
"layer") == 0 || strcmp(maskMethod,
"invertLayer") == 0) {
78 parent->parameters()->ioParamStringRequired(ioFlag, name,
"maskLayerName", &maskLayerName);
82 void MaskLayer::ioParam_featureIdxs(
enum ParamsIOFlag ioFlag) {
83 assert(!parent->parameters()->presentAndNotBeenRead(name,
"maskMethod"));
84 if (strcmp(maskMethod,
"maskFeatures") == 0 || strcmp(maskMethod,
"noMaskFeatures") == 0) {
85 parent->parameters()->ioParamArray(
86 ioFlag, name,
"featureIdxs", &features, &numSpecifiedFeatures);
87 if (numSpecifiedFeatures == 0) {
88 if (parent->columnId() == 0) {
90 "%s: MaskLayer must specify at least one feature for maskMethod \"%s\".\n",
100 MaskLayer::communicateInitInfo(std::shared_ptr<CommunicateInitInfoMessage const> message) {
101 auto status = ANNLayer::communicateInitInfo(message);
105 if (strcmp(maskMethod,
"layer") == 0 || strcmp(maskMethod,
"invertLayer") == 0) {
106 maskLayer = message->lookup<
HyPerLayer>(std::string(maskLayerName));
107 if (maskLayer == NULL) {
108 if (parent->columnId() == 0) {
110 "%s: maskLayerName \"%s\" is not a layer in the HyPerCol.\n",
114 MPI_Barrier(parent->getCommunicator()->communicator());
118 const PVLayerLoc *maskLoc = maskLayer->getLayerLoc();
120 assert(maskLoc != NULL && loc != NULL);
121 if (maskLoc->nxGlobal != loc->nxGlobal || maskLoc->nyGlobal != loc->nyGlobal) {
122 if (parent->columnId() == 0) {
123 ErrorLog(errorMessage);
125 "%s: maskLayerName \"%s\" does not have the same x and y dimensions.\n",
129 " original (nx=%d, ny=%d, nf=%d) versus (nx=%d, ny=%d, nf=%d)\n",
137 MPI_Barrier(parent->getCommunicator()->communicator());
141 if (maskLoc->nf != 1 && maskLoc->nf != loc->nf) {
142 if (parent->columnId() == 0) {
143 ErrorLog(errorMessage);
145 "%s: maskLayerName \"%s\" must either have the same number of features as this " 146 "layer, or one feature.\n",
150 " original (nx=%d, ny=%d, nf=%d) versus (nx=%d, ny=%d, nf=%d)\n",
158 MPI_Barrier(parent->getCommunicator()->communicator());
162 assert(maskLoc->nx == loc->nx && maskLoc->ny == loc->ny);
168 for (
int f = 0; f < numSpecifiedFeatures; f++) {
169 if (features[f] < 0 || features[f] >= loc->nf) {
170 Fatal() <<
"Specified feature " << features[f] <<
"out of bounds\n";
175 return Response::SUCCESS;
178 Response::Status MaskLayer::updateState(
double time,
double dt) {
179 ANNLayer::updateState(time, dt);
181 float *A = getCLayer()->activity->data;
183 int num_channels = getNumChannels();
184 float *gSynHead = GSyn == NULL ? NULL : GSyn[0];
190 int num_neurons = nx * ny * nf;
191 int nbatch = loc->nbatch;
194 const int METHOD_LAYER = 0;
195 const int METHOD_INVERT_LAYER = 1;
196 const int METHOD_FEATURES = 2;
197 const int METHOD_INVERT_FEATURES = 3;
199 if (strcmp(maskMethod,
"layer") == 0) {
200 method = METHOD_LAYER;
202 else if (strcmp(maskMethod,
"invertLayer") == 0) {
203 method = METHOD_INVERT_LAYER;
205 else if (strcmp(maskMethod,
"maskFeatures") == 0) {
206 method = METHOD_FEATURES;
208 else if (strcmp(maskMethod,
"noMaskFeatures") == 0) {
209 method = METHOD_INVERT_FEATURES;
212 for (
int b = 0; b < nbatch; b++) {
213 float *ABatch = A + b * getNumExtended();
214 #ifdef PV_USE_OPENMP_THREADS 215 #pragma omp parallel for 217 for (
int ni = 0; ni < num_neurons; ni++) {
219 int kThisExt = kIndexExtended(
220 ni, nx, ny, nf, loc->halo.lt, loc->halo.rt, loc->halo.dn, loc->halo.up);
225 const PVLayerLoc *maskLoc = maskLayer->getLayerLoc();
226 float *maskActivity = maskLayer->getActivity();
227 float *maskActivityBatch = maskActivity + b * maskLayer->getNumExtended();
229 if (maskLoc->nf == 1) {
235 int kMaskExt = kIndexExtended(
244 maskVal = maskActivityBatch[kMaskExt];
246 case METHOD_INVERT_LAYER: {
247 const PVLayerLoc *maskLoc = maskLayer->getLayerLoc();
248 float *maskActivity = maskLayer->getActivity();
249 float *maskActivityBatch = maskActivity + b * maskLayer->getNumExtended();
251 if (maskLoc->nf == 1) {
257 int kMaskExt = kIndexExtended(
266 if (maskActivityBatch[kMaskExt]) {
270 case METHOD_FEATURES: {
272 int featureNum = featureIndex(ni, nx, ny, nf);
274 for (
int specF = 0; specF < numSpecifiedFeatures; specF++) {
275 if (featureNum == features[specF]) {
281 case METHOD_INVERT_FEATURES: {
283 int featureNum = featureIndex(ni, nx, ny, nf);
285 for (
int specF = 0; specF < numSpecifiedFeatures; specF++) {
286 if (featureNum == features[specF]) {
297 ABatch[kThisExt] = 0;
301 return Response::SUCCESS;
static bool completed(Status &a)
virtual int ioParamsFillGroup(enum ParamsIOFlag ioFlag) override
virtual int ioParamsFillGroup(enum ParamsIOFlag ioFlag) override