8 #include "ISTALayer.hpp" 13 #include "cudakernels/CudaUpdateStateFunctions.hpp" 17 void ISTALayer_update_state(
27 const int numChannels,
38 ISTALayer::ISTALayer() { initialize_base(); }
40 ISTALayer::ISTALayer(
const char *name, HyPerCol *hc) {
45 ISTALayer::~ISTALayer() {}
47 int ISTALayer::initialize_base() {
50 timeConstantTau = 1.0f;
56 int ISTALayer::initialize(
const char *name, HyPerCol *hc) {
57 ANNLayer::initialize(name, hc);
61 Response::Status ISTALayer::allocateDataStructures() {
return ANNLayer::allocateDataStructures(); }
72 parent->parameters()->ioParamValue(
73 ioFlag, name,
"timeConstantTau", &timeConstantTau, timeConstantTau,
true );
77 parent->parameters()->ioParamValue(ioFlag, name,
"selfInteract", &selfInteract, selfInteract);
78 if (parent->columnId() == 0) {
79 InfoLog() <<
"selfInteract = " << selfInteract << std::endl;
84 parent->parameters()->ioParamString(
87 "adaptiveTimeScaleProbe",
88 &mAdaptiveTimeScaleProbeName,
93 int ISTALayer::requireChannel(
int channelNeeded,
int *numChannelsResult) {
94 int status = HyPerLayer::requireChannel(channelNeeded, numChannelsResult);
95 if (channelNeeded >= 2 && parent->columnId() == 0) {
97 "ISTALayer \"%s\": connection on channel %d, but ISTA only uses channels 0 and 1.\n",
105 int ISTALayer::allocateUpdateKernel() {
106 PVCuda::CudaDevice *device = parent->getDevice();
111 const int nx = loc->nx;
112 const int ny = loc->ny;
113 const int nf = loc->nf;
114 const int num_neurons = nx * ny * nf;
115 const int nbatch = loc->nbatch;
116 const int lt = loc->halo.lt;
117 const int rt = loc->halo.rt;
118 const int dn = loc->halo.dn;
119 const int up = loc->halo.up;
120 const int numChannels = this->numChannels;
121 PVCuda::CudaBuffer *d_V = getDeviceV();
123 const float Vth = this->VThresh;
124 const float AMax = this->AMax;
125 const float AMin = this->AMin;
126 const float AShift = this->AShift;
127 const float VWidth = this->VWidth;
128 const bool selfInteract = this->selfInteract;
129 const float tau = timeConstantTau
130 / (float)parent->getDeltaTime();
131 PVCuda::CudaBuffer *d_GSyn = getDeviceGSyn();
132 PVCuda::CudaBuffer *d_activity = getDeviceActivity();
134 size_t size = parent->getNBatch() *
sizeof(double);
135 d_dtAdapt = device->createBuffer(size, &description);
138 updateKernel->setArgs(
156 krUpdate = updateKernel;
160 Response::Status ISTALayer::updateStateGpu(
double time,
double dt) {
161 if (triggerLayer != NULL) {
162 Fatal().printf(
"HyPerLayer::Trigger reset of V does not work on GPUs\n");
165 d_dtAdapt->copyToDevice(deltaTimes());
169 assert(updateKernel);
171 return Response::SUCCESS;
177 Response::Status ISTALayer::updateState(
double time,
double dt) {
179 float *A = clayer->activity->data;
181 int num_channels = getNumChannels();
182 float *gSynHead = GSyn == NULL ? NULL : GSyn[0];
186 int num_neurons = nx * ny * nf;
187 int nbatch = loc->nbatch;
190 if (triggerLayer != NULL && triggerLayer->
needUpdate(time, parent->getDeltaTime())) {
191 for (
int i = 0; i < num_neurons * nbatch; i++) {
196 ISTALayer_update_state(
210 timeConstantTau / (
float)dt,
213 return Response::SUCCESS;
216 double *ISTALayer::deltaTimes() {
217 if (mAdaptiveTimeScaleProbe) {
218 mAdaptiveTimeScaleProbe->
getValues(parent->simulationTime(), &mDeltaTimes);
221 mDeltaTimes.assign(getLayerLoc()->nbatch, parent->getDeltaTime());
223 return mDeltaTimes.data();
228 void ISTALayer_update_state(
230 const int numNeurons,
238 const int numChannels,
virtual void ioParam_timeConstantTau(enum ParamsIOFlag ioFlag)
virtual void ioParam_selfInteract(enum ParamsIOFlag ioFlag)
virtual bool needUpdate(double simTime, double dt)
virtual void ioParam_adaptiveTimeScaleProbe(enum ParamsIOFlag ioFlag)
adaptiveTimeScaleProbe: If using adaptive timesteps, the name of the AdaptiveTimeScaleProbe that will...
virtual int ioParamsFillGroup(enum ParamsIOFlag ioFlag) override
void getValues(double timevalue, double *valuesVector)
virtual double getDeltaUpdateTime() override
virtual int ioParamsFillGroup(enum ParamsIOFlag ioFlag) override