numObservations = 6;
obsInfo = rlNumericSpec([numObservations 1]);
obsInfo.Name = 'observations';
obsInfo.Description = 'Information on reference voltage, measured capacitor voltage and load current';
load('Actions.mat')
actInfo = rlFiniteSetSpec(num2cell(actions,2));
actInfo.Name = 'states';
agentblk = 'Reinforcement_learning_controller_discrete/RL_controller/RL Agent';
env = rlSimulinkEnv(mdl,agentblk,obsInfo,actInfo);
rng(0)
dnn = [
featureInputLayer(numObservations,'Normalization','none','Name','state')
fullyConnectedLayer(24, 'Name','actorFC1')
reluLayer('Name','CriticRelu1')
fullyConnectedLayer(24, 'Name','CriticStateFC2')
reluLayer('Name','CriticCommonRelu')
fullyConnectedLayer(length(actInfo.Elements),'Name','output')];
agentOptions = rlDQNAgentOptions(...
'SampleTime',20e-6,...
'TargetSmoothFactor',1e-3,...
'ExperienceBufferLength',3000,...
'UseDoubleDQN',false,...
'DiscountFactor',0.9,...
'MiniBatchSize',64);
agent = rlDQNAgent(critic,agentOptions);
trainingOptions = rlTrainingOptions(...
'MaxEpisodes',1000,...
'MaxStepsPerEpisode',500,...
'ScoreAveragingWindowLength',5,...
'Verbose',false,...
'Plots','training-progress',...
'StopTrainingCriteria','AverageReward',...
'StopTrainingValue',200,...
'SaveAgentCriteria','EpisodeReward',...
'SaveAgentValue',200);
doTraining = true;
if doTraining
trainingStats = train(agent,env,trainingOptions);
else
load('SimulinkVSCDQN.mat','agent');
end
simOptions = rlSimulationOptions('MaxSteps',500);
experience = sim(env,agent,simOptions);
0 Comments
Sign in to comment.