PetaVision  Alpha
HyPerLCALayer.cpp
1 /*
2  * HyPerLCALayer.cpp
3  *
4  * Created on: Jan 24, 2013
5  * Author: garkenyon
6  */
7 
8 #include "HyPerLCALayer.hpp"
9 #include <iostream>
10 
11 #ifdef PV_USE_CUDA
12 
13 #include "cudakernels/CudaUpdateStateFunctions.hpp"
14 
15 #endif
16 
17 void HyPerLCALayer_update_state(
18  const int nbatch,
19  const int numNeurons,
20  const int nx,
21  const int ny,
22  const int nf,
23  const int lt,
24  const int rt,
25  const int dn,
26  const int up,
27  const int numChannels,
28  float *V,
29  int numVertices,
30  float *verticesV,
31  float *verticesA,
32  float *slopes,
33  const bool selfInteract,
34  double *dtAdapt,
35  const float tau,
36  float *GSynHead,
37  float *activity);
38 
39 namespace PV {
40 
41 HyPerLCALayer::HyPerLCALayer() { initialize_base(); }
42 
43 HyPerLCALayer::HyPerLCALayer(const char *name, HyPerCol *hc) {
44  initialize_base();
45  initialize(name, hc);
46 }
47 
48 HyPerLCALayer::~HyPerLCALayer() { free(mAdaptiveTimeScaleProbeName); }
49 
50 int HyPerLCALayer::initialize_base() {
51  numChannels = 1; // If a connection connects to this layer on inhibitory channel,
52  // HyPerLayer::requireChannel will add necessary channel
53  timeConstantTau = 1.0;
54  selfInteract = true;
55  return PV_SUCCESS;
56 }
57 
58 int HyPerLCALayer::initialize(const char *name, HyPerCol *hc) {
59  ANNLayer::initialize(name, hc);
60  return PV_SUCCESS;
61 }
62 
63 Response::Status HyPerLCALayer::allocateDataStructures() {
64  auto status = ANNLayer::allocateDataStructures(); // Calls allocateUpdateKernel()
65  if (!Response::completed(status)) {
66  return status;
67  }
68  pvAssert(
69  mAdaptiveTimeScaleProbe == nullptr
70  || getLayerLoc()->nbatch == mAdaptiveTimeScaleProbe->getNumValues());
71  mDeltaTimes.resize(getLayerLoc()->nbatch);
72  return Response::SUCCESS;
73 }
74 
75 int HyPerLCALayer::ioParamsFillGroup(enum ParamsIOFlag ioFlag) {
76  int status = ANNLayer::ioParamsFillGroup(ioFlag);
78  ioParam_selfInteract(ioFlag);
80  return status;
81 }
82 
83 void HyPerLCALayer::ioParam_timeConstantTau(enum ParamsIOFlag ioFlag) {
84  parent->parameters()->ioParamValue(
85  ioFlag, name, "timeConstantTau", &timeConstantTau, timeConstantTau, true /*warnIfAbsent*/);
86 }
87 
88 void HyPerLCALayer::ioParam_selfInteract(enum ParamsIOFlag ioFlag) {
89  parent->parameters()->ioParamValue(ioFlag, name, "selfInteract", &selfInteract, selfInteract);
90  if (ioFlag == PARAMS_IO_READ && parent->columnId() == 0) {
91  InfoLog() << getDescription() << ": selfInteract flag is "
92  << (selfInteract ? "true" : "false") << std::endl;
93  }
94 }
95 
96 void HyPerLCALayer::ioParam_adaptiveTimeScaleProbe(enum ParamsIOFlag ioFlag) {
97  parent->parameters()->ioParamString(
98  ioFlag,
99  name,
100  "adaptiveTimeScaleProbe",
101  &mAdaptiveTimeScaleProbeName,
102  nullptr /*default*/,
103  true /*warn if absent*/);
104 }
105 
106 int HyPerLCALayer::requireChannel(int channelNeeded, int *numChannelsResult) {
107  int status = HyPerLayer::requireChannel(channelNeeded, numChannelsResult);
108  if (channelNeeded >= 2 && parent->columnId() == 0) {
109  WarnLog().printf(
110  "HyPerLCALayer \"%s\": connection on channel %d, but HyPerLCA only uses channels 0 and "
111  "1.\n",
112  name,
113  channelNeeded);
114  }
115  return status;
116 }
117 
118 Response::Status
119 HyPerLCALayer::communicateInitInfo(std::shared_ptr<CommunicateInitInfoMessage const> message) {
120  if (mAdaptiveTimeScaleProbeName) {
121  mAdaptiveTimeScaleProbe =
122  message->lookup<AdaptiveTimeScaleProbe>(std::string(mAdaptiveTimeScaleProbeName));
123  if (mAdaptiveTimeScaleProbe == nullptr) {
124  if (parent->getCommunicator()->commRank() == 0) {
125  auto isBadType = message->lookup<BaseObject>(std::string(mAdaptiveTimeScaleProbeName));
126  if (isBadType != nullptr) {
127  ErrorLog() << description << ": adaptiveTimeScaleProbe parameter \""
128  << mAdaptiveTimeScaleProbeName
129  << "\" must be an AdaptiveTimeScaleProbe.\n";
130  }
131  else {
132  ErrorLog() << description << ": adaptiveTimeScaleProbe parameter \""
133  << mAdaptiveTimeScaleProbeName
134  << "\" is not an AdaptiveTimeScaleProbe in the column.\n";
135  }
136  }
137  MPI_Barrier(parent->getCommunicator()->communicator());
138  exit(EXIT_FAILURE);
139  }
140  }
141  auto status = ANNLayer::communicateInitInfo(message);
142  if (!Response::completed(status)) {
143  return status;
144  }
145  return Response::SUCCESS;
146 }
147 
148 #ifdef PV_USE_CUDA
149 int HyPerLCALayer::allocateUpdateKernel() {
150  PVCuda::CudaDevice *device = parent->getDevice();
151  // Set to temp pointer of the subclass
153  // Set arguments
154  const PVLayerLoc *loc = getLayerLoc();
155  const int nx = loc->nx;
156  const int ny = loc->ny;
157  const int nf = loc->nf;
158  const int num_neurons = nx * ny * nf;
159  const int nbatch = loc->nbatch;
160  const int lt = loc->halo.lt;
161  const int rt = loc->halo.rt;
162  const int dn = loc->halo.dn;
163  const int up = loc->halo.up;
164  const int numChannels = this->numChannels;
165  PVCuda::CudaBuffer *d_V = getDeviceV();
166  assert(d_V);
167  const float Vth = this->VThresh;
168  const float AMax = this->AMax;
169  const float AMin = this->AMin;
170  const float AShift = this->AShift;
171  const float VWidth = this->VWidth;
172  const bool selfInteract = this->selfInteract;
173  const float tau = timeConstantTau
174  / (float)parent->getDeltaTime(); // TODO: eliminate need to call parent method
175  PVCuda::CudaBuffer *d_GSyn = getDeviceGSyn();
176  PVCuda::CudaBuffer *d_activity = getDeviceActivity();
177 
178  size_t size = parent->getNBatch() * sizeof(double);
179  d_dtAdapt = device->createBuffer(size, &description);
180 
181  size = (size_t)numVertices * sizeof(*verticesV);
182  d_verticesV = device->createBuffer(size, &description);
183  d_verticesA = device->createBuffer(size, &description);
184  d_slopes = device->createBuffer(size + sizeof(*slopes), &description);
185 
186  d_verticesV->copyToDevice(verticesV);
187  d_verticesA->copyToDevice(verticesA);
188  d_slopes->copyToDevice(slopes);
189 
190  // Set arguments to kernel
191  updateKernel->setArgs(
192  nbatch,
193  num_neurons,
194  nx,
195  ny,
196  nf,
197  lt,
198  rt,
199  dn,
200  up,
201  numChannels,
202  d_V,
203  numVertices,
204  d_verticesV,
205  d_verticesA,
206  d_slopes,
207  selfInteract,
208  d_dtAdapt,
209  tau,
210  d_GSyn,
211  d_activity);
212 
213  // Update d_V for V initialization
214 
215  // set updateKernel to krUpdate
216  krUpdate = updateKernel;
217  return PV_SUCCESS;
218 }
219 #endif
220 
221 #ifdef PV_USE_CUDA
222 Response::Status HyPerLCALayer::updateStateGpu(double time, double dt) {
223  // Copy over d_dtAdapt
224  d_dtAdapt->copyToDevice(deltaTimes());
225  // Change dt to match what is passed in
226  PVCuda::CudaUpdateHyPerLCALayer *updateKernel =
227  dynamic_cast<PVCuda::CudaUpdateHyPerLCALayer *>(krUpdate);
228  assert(updateKernel);
229  runUpdateKernel();
230  return Response::SUCCESS;
231 }
232 #endif
233 
234 double HyPerLCALayer::getDeltaUpdateTime() { return parent->getDeltaTime(); }
235 
236 Response::Status HyPerLCALayer::updateState(double time, double dt) {
237  const PVLayerLoc *loc = getLayerLoc();
238  float *A = clayer->activity->data;
239  float *V = getV();
240  int num_channels = getNumChannels();
241  float *gSynHead = GSyn == NULL ? NULL : GSyn[0];
242  {
243  int nx = loc->nx;
244  int ny = loc->ny;
245  int nf = loc->nf;
246  int num_neurons = nx * ny * nf;
247  int nbatch = loc->nbatch;
248  // Only update when the probe updates
249 
250  HyPerLCALayer_update_state(
251  nbatch,
252  num_neurons,
253  nx,
254  ny,
255  nf,
256  loc->halo.lt,
257  loc->halo.rt,
258  loc->halo.dn,
259  loc->halo.up,
260  numChannels,
261  V,
262  numVertices,
263  verticesV,
264  verticesA,
265  slopes,
266  selfInteract,
267  deltaTimes(),
268  timeConstantTau / (float)dt,
269  gSynHead,
270  A);
271  }
272 
273  return Response::SUCCESS;
274 }
275 
276 double *HyPerLCALayer::deltaTimes() {
277  if (mAdaptiveTimeScaleProbe) {
278  mAdaptiveTimeScaleProbe->getValues(parent->simulationTime(), &mDeltaTimes);
279  }
280  else {
281  mDeltaTimes.assign(getLayerLoc()->nbatch, parent->getDeltaTime());
282  }
283  return mDeltaTimes.data();
284 }
285 
286 } /* namespace PV */
287 
288 void HyPerLCALayer_update_state(
289  const int nbatch,
290  const int numNeurons,
291  const int nx,
292  const int ny,
293  const int nf,
294  const int lt,
295  const int rt,
296  const int dn,
297  const int up,
298  const int numChannels,
299 
300  float *V,
301  int numVertices,
302  float *verticesV,
303  float *verticesA,
304  float *slopes,
305  const bool selfInteract,
306  double *dtAdapt,
307  const float tau,
308  float *GSynHead,
309  float *activity) {
310  updateV_HyPerLCALayer(
311  nbatch,
312  numNeurons,
313  numChannels,
314  V,
315  GSynHead,
316  activity,
317  numVertices,
318  verticesV,
319  verticesA,
320  slopes,
321  dtAdapt,
322  tau,
323  selfInteract,
324  nx,
325  ny,
326  nf,
327  lt,
328  rt,
329  dn,
330  up);
331 }
virtual void ioParam_adaptiveTimeScaleProbe(enum ParamsIOFlag ioFlag)
adaptiveTimeScaleProbe: If using adaptive timesteps, the name of the AdaptiveTimeScaleProbe that will...
virtual double getDeltaUpdateTime() override
virtual int ioParamsFillGroup(enum ParamsIOFlag ioFlag) override
static bool completed(Status &a)
Definition: Response.hpp:49
virtual void ioParam_selfInteract(enum ParamsIOFlag ioFlag)
timeConstantTau: the self-interaction coefficient s for the LCA dynamics, which models the equation d...
virtual void ioParam_timeConstantTau(enum ParamsIOFlag ioFlag)
timeConstantTau: the time constant tau for the LCA dynamics, which models the equation dV/dt = 1/tau*...
void getValues(double timevalue, double *valuesVector)
Definition: BaseProbe.cpp:336
int getNumValues()
Definition: BaseProbe.hpp:61
virtual int ioParamsFillGroup(enum ParamsIOFlag ioFlag) override
Definition: ANNLayer.cpp:89