PetaVision  Alpha
ISTALayer.cpp
1 /*
2  * ISTALayer.cpp
3  *
4  * Created on: Jan 24, 2013
5  * Author: garkenyon
6  */
7 
8 #include "ISTALayer.hpp"
9 #include <iostream>
10 
11 #ifdef PV_USE_CUDA
12 
13 #include "cudakernels/CudaUpdateStateFunctions.hpp"
14 
15 #endif
16 
17 void ISTALayer_update_state(
18  const int nbatch,
19  const int numNeurons,
20  const int nx,
21  const int ny,
22  const int nf,
23  const int lt,
24  const int rt,
25  const int dn,
26  const int up,
27  const int numChannels,
28 
29  float *V,
30  const float Vth,
31  double *dtAdapt,
32  const float tau,
33  float *GSynHead,
34  float *activity);
35 
36 namespace PV {
37 
38 ISTALayer::ISTALayer() { initialize_base(); }
39 
40 ISTALayer::ISTALayer(const char *name, HyPerCol *hc) {
41  initialize_base();
42  initialize(name, hc);
43 }
44 
45 ISTALayer::~ISTALayer() {}
46 
47 int ISTALayer::initialize_base() {
48  numChannels = 1; // If a connection connects to this layer on inhibitory channel,
49  // HyPerLayer::requireChannel will add necessary channel
50  timeConstantTau = 1.0f;
51  // Locality in conn
52  selfInteract = true;
53  return PV_SUCCESS;
54 }
55 
56 int ISTALayer::initialize(const char *name, HyPerCol *hc) {
57  ANNLayer::initialize(name, hc);
58  return PV_SUCCESS;
59 }
60 
61 Response::Status ISTALayer::allocateDataStructures() { return ANNLayer::allocateDataStructures(); }
62 
63 int ISTALayer::ioParamsFillGroup(enum ParamsIOFlag ioFlag) {
64  int status = ANNLayer::ioParamsFillGroup(ioFlag);
66 
67  ioParam_selfInteract(ioFlag);
68  return status;
69 }
70 
71 void ISTALayer::ioParam_timeConstantTau(enum ParamsIOFlag ioFlag) {
72  parent->parameters()->ioParamValue(
73  ioFlag, name, "timeConstantTau", &timeConstantTau, timeConstantTau, true /*warnIfAbsent*/);
74 }
75 
76 void ISTALayer::ioParam_selfInteract(enum ParamsIOFlag ioFlag) {
77  parent->parameters()->ioParamValue(ioFlag, name, "selfInteract", &selfInteract, selfInteract);
78  if (parent->columnId() == 0) {
79  InfoLog() << "selfInteract = " << selfInteract << std::endl;
80  }
81 }
82 
83 void ISTALayer::ioParam_adaptiveTimeScaleProbe(enum ParamsIOFlag ioFlag) {
84  parent->parameters()->ioParamString(
85  ioFlag,
86  name,
87  "adaptiveTimeScaleProbe",
88  &mAdaptiveTimeScaleProbeName,
89  nullptr /*default*/,
90  true /*warn if absent*/);
91 }
92 
93 int ISTALayer::requireChannel(int channelNeeded, int *numChannelsResult) {
94  int status = HyPerLayer::requireChannel(channelNeeded, numChannelsResult);
95  if (channelNeeded >= 2 && parent->columnId() == 0) {
96  WarnLog().printf(
97  "ISTALayer \"%s\": connection on channel %d, but ISTA only uses channels 0 and 1.\n",
98  name,
99  channelNeeded);
100  }
101  return status;
102 }
103 
104 #ifdef PV_USE_CUDA
105 int ISTALayer::allocateUpdateKernel() {
106  PVCuda::CudaDevice *device = parent->getDevice();
107  // Set to temp pointer of the subclass
108  PVCuda::CudaUpdateISTALayer *updateKernel = new PVCuda::CudaUpdateISTALayer(device);
109  // Set arguments
110  const PVLayerLoc *loc = getLayerLoc();
111  const int nx = loc->nx;
112  const int ny = loc->ny;
113  const int nf = loc->nf;
114  const int num_neurons = nx * ny * nf;
115  const int nbatch = loc->nbatch;
116  const int lt = loc->halo.lt;
117  const int rt = loc->halo.rt;
118  const int dn = loc->halo.dn;
119  const int up = loc->halo.up;
120  const int numChannels = this->numChannels;
121  PVCuda::CudaBuffer *d_V = getDeviceV();
122  assert(d_V);
123  const float Vth = this->VThresh;
124  const float AMax = this->AMax;
125  const float AMin = this->AMin;
126  const float AShift = this->AShift;
127  const float VWidth = this->VWidth;
128  const bool selfInteract = this->selfInteract;
129  const float tau = timeConstantTau
130  / (float)parent->getDeltaTime(); // TODO: eliminate need to call parent method
131  PVCuda::CudaBuffer *d_GSyn = getDeviceGSyn();
132  PVCuda::CudaBuffer *d_activity = getDeviceActivity();
133 
134  size_t size = parent->getNBatch() * sizeof(double);
135  d_dtAdapt = device->createBuffer(size, &description);
136 
137  // Set arguments to kernel
138  updateKernel->setArgs(
139  nbatch,
140  num_neurons,
141  nx,
142  ny,
143  nf,
144  lt,
145  rt,
146  dn,
147  up,
148  numChannels,
149  d_V,
150  Vth,
151  d_dtAdapt,
152  tau,
153  d_GSyn,
154  d_activity);
155 
156  krUpdate = updateKernel;
157  return PV_SUCCESS;
158 }
159 
160 Response::Status ISTALayer::updateStateGpu(double time, double dt) {
161  if (triggerLayer != NULL) {
162  Fatal().printf("HyPerLayer::Trigger reset of V does not work on GPUs\n");
163  }
164  // Copy over d_dtAdapt
165  d_dtAdapt->copyToDevice(deltaTimes());
166  // Change dt to match what is passed in
167  PVCuda::CudaUpdateISTALayer *updateKernel =
168  dynamic_cast<PVCuda::CudaUpdateISTALayer *>(krUpdate);
169  assert(updateKernel);
170  runUpdateKernel();
171  return Response::SUCCESS;
172 }
173 #endif
174 
175 double ISTALayer::getDeltaUpdateTime() { return parent->getDeltaTime(); }
176 
177 Response::Status ISTALayer::updateState(double time, double dt) {
178  const PVLayerLoc *loc = getLayerLoc();
179  float *A = clayer->activity->data;
180  float *V = getV();
181  int num_channels = getNumChannels();
182  float *gSynHead = GSyn == NULL ? NULL : GSyn[0];
183  int nx = loc->nx;
184  int ny = loc->ny;
185  int nf = loc->nf;
186  int num_neurons = nx * ny * nf;
187  int nbatch = loc->nbatch;
188  // Only update when the probe updates
189 
190  if (triggerLayer != NULL && triggerLayer->needUpdate(time, parent->getDeltaTime())) {
191  for (int i = 0; i < num_neurons * nbatch; i++) {
192  V[i] = 0.0;
193  }
194  }
195 
196  ISTALayer_update_state(
197  nbatch,
198  num_neurons,
199  nx,
200  ny,
201  nf,
202  loc->halo.lt,
203  loc->halo.rt,
204  loc->halo.dn,
205  loc->halo.up,
206  numChannels,
207  V,
208  VThresh,
209  deltaTimes(),
210  timeConstantTau / (float)dt,
211  gSynHead,
212  A);
213  return Response::SUCCESS;
214 }
215 
216 double *ISTALayer::deltaTimes() {
217  if (mAdaptiveTimeScaleProbe) {
218  mAdaptiveTimeScaleProbe->getValues(parent->simulationTime(), &mDeltaTimes);
219  }
220  else {
221  mDeltaTimes.assign(getLayerLoc()->nbatch, parent->getDeltaTime());
222  }
223  return mDeltaTimes.data();
224 }
225 
226 } /* namespace PV */
227 
228 void ISTALayer_update_state(
229  const int nbatch,
230  const int numNeurons,
231  const int nx,
232  const int ny,
233  const int nf,
234  const int lt,
235  const int rt,
236  const int dn,
237  const int up,
238  const int numChannels,
239 
240  float *V,
241  const float Vth,
242  double *dtAdapt,
243  const float tau,
244  float *GSynHead,
245  float *activity) {
246  updateV_ISTALayer(
247  nbatch,
248  numNeurons,
249  V,
250  GSynHead,
251  activity,
252  Vth,
253  dtAdapt,
254  tau,
255  nx,
256  ny,
257  nf,
258  lt,
259  rt,
260  dn,
261  up,
262  numChannels);
263 }
virtual void ioParam_timeConstantTau(enum ParamsIOFlag ioFlag)
Definition: ISTALayer.cpp:71
virtual void ioParam_selfInteract(enum ParamsIOFlag ioFlag)
Definition: ISTALayer.cpp:76
virtual bool needUpdate(double simTime, double dt)
virtual void ioParam_adaptiveTimeScaleProbe(enum ParamsIOFlag ioFlag)
adaptiveTimeScaleProbe: If using adaptive timesteps, the name of the AdaptiveTimeScaleProbe that will...
Definition: ISTALayer.cpp:83
virtual int ioParamsFillGroup(enum ParamsIOFlag ioFlag) override
Definition: ISTALayer.cpp:63
void getValues(double timevalue, double *valuesVector)
Definition: BaseProbe.cpp:336
virtual double getDeltaUpdateTime() override
Definition: ISTALayer.cpp:175
virtual int ioParamsFillGroup(enum ParamsIOFlag ioFlag) override
Definition: ANNLayer.cpp:89