nI = observationInfo.Dimension(1);
nL1 = 12;
nL2 = 24;
nL3 = 24;
nO = numel(actionInfo.Elements);
dnn = [
featureInputLayer(nI,'Normalization','none','Name','state')
fullyConnectedLayer(nL1,'Name','fc1')
reluLayer('Name','relu1')
fullyConnectedLayer(nL2,'Name','fc2')
reluLayer('Name','relu2')
fullyConnectedLayer(nL3,'Name','fc3')
reluLayer('Name','relu3')
fullyConnectedLayer(nO,'Name','out')];
criticOptions = rlRepresentationOptions('LearnRate',0.0001,'GradientThreshold',1, 'UseDevice', "gpu");
critic = rlQValueRepresentation(dnn,observationInfo,actionInfo,'Observation',{'state'}, criticOptions);
agentOptions = rlDQNAgentOptions(...
'SampleTime', Ts,...
'UseDoubleDQN', true,...
'TargetSmoothFactor',1e-2,...
'TargetUpdateFrequency', 20,...
'DiscountFactor',0.99,...
'ExperienceBufferLength',1e8);
agentOptions.EpsilonGreedyExploration.EpsilonMin = 0.01;
agentOptions.EpsilonGreedyExploration.EpsilonDecay = 0.0001;
agentOptions.EpsilonGreedyExploration.Epsilon = 1;
agent = rlDQNAgent(critic,agentOptions);
maxsteps = ceil(trun/Ts);
trainingOpts = rlTrainingOptions(...
'MaxEpisodes',maxepisodes,...
'MaxStepsPerEpisode',maxsteps,...
'ScoreAveragingWindowLength',5,...
'Verbose',true,...
'UseParallel',false,...
'StopTrainingCriteria','EpisodeCount',...
'StopTrainingValue',300,...
'SaveAgentCriteria','EpisodeCount',...
'SaveAgentValue', 300,...
'SaveAgentDirectory', "/Agent_5ac2ob_.mat",'agent');
trainingStats = train(agent,env,trainingOpts);
0 Comments
Sign in to comment.