8 #include "HyPerLCALayer.hpp" 13 #include "cudakernels/CudaUpdateStateFunctions.hpp" 17 void HyPerLCALayer_update_state(
27 const int numChannels,
33 const bool selfInteract,
41 HyPerLCALayer::HyPerLCALayer() { initialize_base(); }
43 HyPerLCALayer::HyPerLCALayer(
const char *name, HyPerCol *hc) {
48 HyPerLCALayer::~HyPerLCALayer() { free(mAdaptiveTimeScaleProbeName); }
50 int HyPerLCALayer::initialize_base() {
53 timeConstantTau = 1.0;
58 int HyPerLCALayer::initialize(
const char *name, HyPerCol *hc) {
59 ANNLayer::initialize(name, hc);
63 Response::Status HyPerLCALayer::allocateDataStructures() {
64 auto status = ANNLayer::allocateDataStructures();
69 mAdaptiveTimeScaleProbe ==
nullptr 70 || getLayerLoc()->nbatch == mAdaptiveTimeScaleProbe->
getNumValues());
71 mDeltaTimes.resize(getLayerLoc()->nbatch);
72 return Response::SUCCESS;
84 parent->parameters()->ioParamValue(
85 ioFlag, name,
"timeConstantTau", &timeConstantTau, timeConstantTau,
true );
89 parent->parameters()->ioParamValue(ioFlag, name,
"selfInteract", &selfInteract, selfInteract);
90 if (ioFlag == PARAMS_IO_READ && parent->columnId() == 0) {
91 InfoLog() << getDescription() <<
": selfInteract flag is " 92 << (selfInteract ?
"true" :
"false") << std::endl;
97 parent->parameters()->ioParamString(
100 "adaptiveTimeScaleProbe",
101 &mAdaptiveTimeScaleProbeName,
106 int HyPerLCALayer::requireChannel(
int channelNeeded,
int *numChannelsResult) {
107 int status = HyPerLayer::requireChannel(channelNeeded, numChannelsResult);
108 if (channelNeeded >= 2 && parent->columnId() == 0) {
110 "HyPerLCALayer \"%s\": connection on channel %d, but HyPerLCA only uses channels 0 and " 119 HyPerLCALayer::communicateInitInfo(std::shared_ptr<CommunicateInitInfoMessage const> message) {
120 if (mAdaptiveTimeScaleProbeName) {
121 mAdaptiveTimeScaleProbe =
123 if (mAdaptiveTimeScaleProbe ==
nullptr) {
124 if (parent->getCommunicator()->commRank() == 0) {
125 auto isBadType = message->lookup<
BaseObject>(std::string(mAdaptiveTimeScaleProbeName));
126 if (isBadType !=
nullptr) {
127 ErrorLog() << description <<
": adaptiveTimeScaleProbe parameter \"" 128 << mAdaptiveTimeScaleProbeName
129 <<
"\" must be an AdaptiveTimeScaleProbe.\n";
132 ErrorLog() << description <<
": adaptiveTimeScaleProbe parameter \"" 133 << mAdaptiveTimeScaleProbeName
134 <<
"\" is not an AdaptiveTimeScaleProbe in the column.\n";
137 MPI_Barrier(parent->getCommunicator()->communicator());
141 auto status = ANNLayer::communicateInitInfo(message);
145 return Response::SUCCESS;
149 int HyPerLCALayer::allocateUpdateKernel() {
150 PVCuda::CudaDevice *device = parent->getDevice();
155 const int nx = loc->nx;
156 const int ny = loc->ny;
157 const int nf = loc->nf;
158 const int num_neurons = nx * ny * nf;
159 const int nbatch = loc->nbatch;
160 const int lt = loc->halo.lt;
161 const int rt = loc->halo.rt;
162 const int dn = loc->halo.dn;
163 const int up = loc->halo.up;
164 const int numChannels = this->numChannels;
165 PVCuda::CudaBuffer *d_V = getDeviceV();
167 const float Vth = this->VThresh;
168 const float AMax = this->AMax;
169 const float AMin = this->AMin;
170 const float AShift = this->AShift;
171 const float VWidth = this->VWidth;
172 const bool selfInteract = this->selfInteract;
173 const float tau = timeConstantTau
174 / (float)parent->getDeltaTime();
175 PVCuda::CudaBuffer *d_GSyn = getDeviceGSyn();
176 PVCuda::CudaBuffer *d_activity = getDeviceActivity();
178 size_t size = parent->getNBatch() *
sizeof(double);
179 d_dtAdapt = device->createBuffer(size, &description);
181 size = (size_t)numVertices *
sizeof(*verticesV);
182 d_verticesV = device->createBuffer(size, &description);
183 d_verticesA = device->createBuffer(size, &description);
184 d_slopes = device->createBuffer(size +
sizeof(*slopes), &description);
186 d_verticesV->copyToDevice(verticesV);
187 d_verticesA->copyToDevice(verticesA);
188 d_slopes->copyToDevice(slopes);
191 updateKernel->setArgs(
216 krUpdate = updateKernel;
222 Response::Status HyPerLCALayer::updateStateGpu(
double time,
double dt) {
224 d_dtAdapt->copyToDevice(deltaTimes());
228 assert(updateKernel);
230 return Response::SUCCESS;
236 Response::Status HyPerLCALayer::updateState(
double time,
double dt) {
238 float *A = clayer->activity->data;
240 int num_channels = getNumChannels();
241 float *gSynHead = GSyn == NULL ? NULL : GSyn[0];
246 int num_neurons = nx * ny * nf;
247 int nbatch = loc->nbatch;
250 HyPerLCALayer_update_state(
268 timeConstantTau / (
float)dt,
273 return Response::SUCCESS;
276 double *HyPerLCALayer::deltaTimes() {
277 if (mAdaptiveTimeScaleProbe) {
278 mAdaptiveTimeScaleProbe->
getValues(parent->simulationTime(), &mDeltaTimes);
281 mDeltaTimes.assign(getLayerLoc()->nbatch, parent->getDeltaTime());
283 return mDeltaTimes.data();
288 void HyPerLCALayer_update_state(
290 const int numNeurons,
298 const int numChannels,
305 const bool selfInteract,
310 updateV_HyPerLCALayer(
virtual void ioParam_adaptiveTimeScaleProbe(enum ParamsIOFlag ioFlag)
adaptiveTimeScaleProbe: If using adaptive timesteps, the name of the AdaptiveTimeScaleProbe that will...
virtual double getDeltaUpdateTime() override
virtual int ioParamsFillGroup(enum ParamsIOFlag ioFlag) override
static bool completed(Status &a)
virtual void ioParam_selfInteract(enum ParamsIOFlag ioFlag)
timeConstantTau: the self-interaction coefficient s for the LCA dynamics, which models the equation d...
virtual void ioParam_timeConstantTau(enum ParamsIOFlag ioFlag)
timeConstantTau: the time constant tau for the LCA dynamics, which models the equation dV/dt = 1/tau*...
void getValues(double timevalue, double *valuesVector)
virtual int ioParamsFillGroup(enum ParamsIOFlag ioFlag) override