МІНІСТЕРСТВО ОСВІТИ І НАУКИ УКРАЇНИ
НАЦІОНАЛЬНИЙ УНІВЕРСИТЕТ «ЛЬВІВСЬКА ПОЛІТЕХНІКА»
Кафедра ЕОМ
ЗВІТ
до лабораторної роботи №6
на тему:
«Дослідження методів та алгоритмів навчання з підкріпленням (Reinforcement Learning) у середовищі MDP»
з дисципліни:
«Теорія інтелектуальних систем»
Львів – 2015
Мета роботи:
Дослідити роботу методів та алгоритмів навчання з підкріпленням у середовищі MDP, набути навички їх реалізації.
Хід роботи:
1. Реалізувати модель взаємодії агента з середовищем у вигляді марківського процесу прийняття рішень (кількість станів середовища та кількість доступних агенту дій обрати згідно варіанту). Модель оптимальної поведінки (цільова функція): сумарний виграш з відступаючим горизонтом (receding-horizon model).
Номер варіанту:
TD(0) => навчання на основі послідовної переоцінки (temporal difference)
N
Метод навчання з підкріпленням
Кількість станів MDP
Кількість доступних агенту дій
5
TD(0)
3
3
2. Текст програми обчислювального експерименту
#include <stdio.h>
#include <stdlib.h>
#include <time.h>
#include <tchar.h>
#include <math.h>
#define ENVTYPE 1
#define NACTIONS 3
#define NSTATES 3
#define NSTEPS 500
#define NREPLICAS 1
#define REWARD 10//+1
#define PENALTY 0//-1
#define RLTYPE 5 //3 //4
#define RLEPSILON 0.1f
#define RLTAU 0.12f
#define TDALFA 0.3f
#define TDGAMMA 0.1f
#define QLALFA 0.5f
#define QLGAMMA 0.1f
// ----------------------------------------------------------------------------
// global parameters and values
int t; // current time step
int T = NSTEPS; // number of time steps = number of interactions between agent and environment
int n = NREPLICAS; // number of replicas
int nA = NACTIONS; // number of actions
int nS = NSTATES; // number of states
// ----------------------------------------------------------------------------
// environment
int env = ENVTYPE; // type of environment:
// env = 0 -> se (stationary environment)
// env = 1 -> mdp
float sePa[NACTIONS]; // se: probabilities of rewards for each action
int ceState; // ce: current state of commutative environment
float cePa[NSTATES][NACTIONS]; // ce: probabilities of reward for each action for each state of environment
float cePs[NSTATES][NSTATES]; // ce: probabilities of transition from one state to another
int mdpState;
int mdpR[NSTATES][NACTIONS];
float mdpT[NSTATES][NACTIONS][NSTATES];
// ----------------------------------------------------------------------------
// agent
//int agt; // type of agent:
// agt = 0 -> random agent
// agt = 1 -> perfect agent
// agt = 2 -> greedy RL
// agt = 3 -> epsilon-greedy RL
// agt = 4 -> softmax action selection
// agt = 5 -> TD(0)
int agt = 6;
int action = 0; // current action = {0, ... ,(nA-1)}
int response; // current response of environment = {0;1}/{-1;+1}
int paction[NSTATES]; // actions of perfect agent (per state) (MDP)
float gammaVI = 0.1f; // learning rate for value iteration (MDP)
float e = RLEPSILON; // epsilon value (epsilon-greedy RL)
float tau = RLTAU; // tau value (softmax action selection)
float k[NSTATES][NACTIONS]; // number of realizations for each action
float r[NSTATES][NACTIONS]; // total reward for each action
float Q[NSTATES][NACTIONS]; // estimated action value Q[i]=r[i]/k[i] for each action;
float p[NACTIONS]; // selection probability for each action (softmax);
int mdpPrevState; // previous state of MDP
float AHCresponse; // current response of Adaptive Heuristic Critic (for TD(0))
float alfaTD = TDALFA; // learning rate (TD(0))
float gammaTD = TDGAMMA; // discount factor (TD(0))
float alfaQL = QLALFA; // learning rate (Q-learning)
float gammaQL = QLGAMMA; // discount factor (Q-learning)
float V[NSTATES]; // estimated value of state (TD(0))
float Qsa[NSTATES][NACTIONS]; // expected discounted reinforcement of taking action a in state s (Q-learning)
// ----------------------------------------------------------------------------
// results for current replica
float sumR; // total reward over time sumR(t)
float avrR; // average reward over time avrR(t) = sumR(t)/t
// ----------------------------------------------------------------------------
// tabulated results
float _sumR[NSTEPS][NREPLICAS];
float _avrR[NSTEPS][NREPLICAS];
// ----------------------------------------------------------------------------
// final simulation results
float sumRm[NSTEPS]; // mean values of sumR(t)
float sumRv[NSTEPS]; // corresponding variances
float avrRm[NSTEPS]; // mean values of avrR(t)
float avrRv[NSTEPS]; // corresponding variances
// ----------------------------------------------------------------------------
// files for parameters and results
char * par_file_name = "d:/temp2/lab6.parameters.dat";
FILE * par_file;
char * RA_res_file_name = "d:/temp2/lab6.RA.results.dat";
FILE * RA_res_file;
char * PA_res_file_name = "d:/temp2/lab6.PA.results.dat";
FILE * PA_res_file;
char * RL_res_file_name = "d:/temp2/lab6.RL.results.dat";
FILE * RL_res_file;
// ----------------------------------------------------------------------------
// uniform discrete probability distribution
int uRand(int x)
{
int _rnum = (int)((float)x * (float)rand() / (float)RAND_MAX);
return _rnum;
}
// ----------------------------------------------------------------------------
// discrete probability distribution specified by probabilities from <_array>
int dRand(float* _array, int size)
{
int _rnum = size - 1;
float _left = 0;
float _right = _array[0];
float ftmp = (float)rand() / (float)RAND_MAX;
for (int i = 0; i < size - 1; i++)
{
if ((ftmp >= _left) && (ftmp < _right)) { _rnum = i; break; }
_left = _right;
_right += _array[i + 1];
}
return _rnum;
}
// ----------------------------------------------------------------------------
// initialization of stationary environment
void seInit(void)
{
for (int i = 0; i < nA; i++)
sePa[i] = (float)rand() / (float)RAND_MAX;
//sePa[0] = 0.2f;
//sePa[1] = 0.8f;
}
// ----------------------------------------------------------------------------
// response of stationary environment
int seResponse(void)
{
int _r;
float rnum = (float)rand() / (float)RAND_MAX;
if (rnum < sePa[action]) _r = REWARD;
else _r = PENALTY;
return _r;
}
// ----------------------------------------------------------------------------
// initialization of mdp
void mdpInit(void)
{
int i, j, v;
int maxReward = REWARD;
float _sum1, _sum2;
// probabilities of rewards
for (i = 0; i < nS; i++)
for (j = 0; j < nA; j++)
mdpR[i][j] = uRand(maxReward);
// probabilities of state transition
for (i = 0; i < nS; i++)
for (j = 0; j < nA; j++)
{
_sum1 = 0;
_sum2 = 0;
for (v = 0; v < nS; v++)
{
mdpT[i][j][v] = (float)rand() / (float)RAND_MAX;
_sum1 += mdpT[i][j][v];
}
for (v = 0; v < nS - 1; v++)
{
mdpT[i][j][v] = mdpT[i][j][v] / _sum1;
_sum2 += mdpT[i][j][v];
}
mdpT[i][j][nS - 1] = 1.0f - _sum2;
}
// initial state
mdpState = uRand(nS);
}
// ----------------------------------------------------------------------------
// response of mdp & state transition
int mdpResponse(void)
{
int _r;
// get response in current state
_r = mdpR[mdpState][action];
// commutate states
mdpState = dRand(mdpT[ceState][action], nS);
return _r;
}
// ----------------------------------------------------------------------------
// environment
int environment(int _en)
{
int _r = 0;
switch (_en)
{
case 0: _r = seResponse(); break;
case 1: _r = mdpResponse(); break;
default: printf("lab3 error: wrong env code specified\n");
}
return _r;
}
// ----------------------------------------------------------------------------
// save parameters in file
void saveParameters(void)
{
int i, j, v;
if ((par_file = fopen(par_file_name, "w")) == NULL) {
fprintf(stderr, "Cannot open file <%s> for parameters of experiment.\n", par_file_name);
}
fprintf(par_file, "T = %d\n", T);
fprintf(par_file, "n = %d\n", n);
fprintf(par_file, "env = %d\n", env);
fprintf(par_file, "nA = %d\n", nA);
if (env) fprintf(par_file, "nS = %d\n", nS);
fprintf(par_file, "RL-agent type = %d\n", RLTYPE);
if (agt == 3) fprintf(par_file, "epsilon = %f\n", e);
if (agt == 4) fprintf(par_file, "tau = %f\n", tau);
fprintf(par_file, "====================\n");
switch (env)
{
case 0: // se (stationary environment)
for (i = 0; i < nA; i++)
fprintf(par_file, "p(a%d) = %f\n", i, sePa[i]);
break;
case 1: // mdp
// values of reward function
for (i = 0; i < nS; i++)
{
for (j = 0; j < nA; j++)
fprintf(par_file, "R(s%d,a%d) = %d\n", i, j, mdpR[i][j]);
if (i < nS - 1) fprintf(par_file, "--------------------\n");
}
fprintf(par_file, "\n====================\n");
// probabilities of state transition (values of the state transition function)
for (i = 0; i < nS; i++)
{
for (j = 0; j < nA; j++)
{
for (v = 0; v < nS; v++)
fprintf(par_file, "T(s%d,a%d,s%d) = %f\n", i, j, v, mdpT[i][j][v]);
fprintf(par_file, "--------------------\n");
}
}
break;
default: printf("lab3 error: wrong env model code specified\n");
}
fclose(par_file);
}
// ----------------------------------------------------------------------------
// save results of random agent
void saveResultsRA(void)
{
int i;
if ((RA_res_file = fopen(RA_res_file_name, "w")) == NULL)
fprintf(stderr, "Cannot open file <%s> for experimental results.\n", RA_res_file_name);
for (i = 0; i < T; i++)
//fprintf(RA_res_file, "%f\n", avrRm[i]);//, avrRv[i] sumRm[i], sumRv[i], ,%f,%f,%f
fprintf(RA_res_file, "%f\n", sumRm[i]);
fclose(RA_res_file);
}
// ----------------------------------------------------------------------------
// save results of perfect agent
void saveResultsPA(void)
{
int i;
if ((PA_res_file = fopen(PA_res_file_name, "w")) == NULL)
fprintf(stderr, "Cannot open file <%s> for experimental results.\n", PA_res_file_name);
for (i = 0; i < T; i++)
// fprintf(PA_res_file, "%f,%f,%f,%f\n", sumRm[i], sumRv[i], avrRm[i], avrRv[i]);
fprintf(PA_res_file, "%f\n", sumRm[i]);
fclose(PA_res_file);
}
// ----------------------------------------------------------------------------
// save results of RL-agent
void saveResultsRL(void)
{
int i;
if ((RL_res_file = fopen(RL_res_file_name, "w")) == NULL)
fprintf(stderr, "Cannot open file <%s> for experimental results.\n", RL_res_file_name);
for (i = 0; i < T; i++)
//fprintf(RL_res_file, "%f,%f,%f,%f\n", sumRm[i], sumRv[i], avrRm[i], avrRv[i]);
fprintf(RL_res_file, "%f\n", sumRm[i]);
fclose(RL_res_file);
}
// ----------------------------------------------------------------------------
// return maximum value from <_array> of <size> elements
float max(float* _array, int size)
{
int _arg = uRand(size);
float _max = _array[_arg];
for (int i = 0; i < size; i++)
if (_array[i] > _max) _max = _array[i];
return _max;
}
// ----------------------------------------------------------------------------
// return index of maximum value in <_array> of <size> elements
int argmax(float* _array, int size)
{
int _arg = uRand(size);
float _max = _array[_arg];
for (int i = 0; i < size; i++)
if (_array[i] > _max) { _max = _array[i]; _arg = i; }
return _arg;
}
// ----------------------------------------------------------------------------
// init perfect agent (for MDP)
void initPerfectAgent(void)
{
int i, j, z;
float sum = 0.0f;
// perform value iteration --> optimal value function V*(s)
for (z = 0; z < nS; z++) V[z] = 1.0f;
for (t = 0; t < T * 30; t++)
{
for (i = 0; i < nS; i++)
{
for (j = 0; j < nA; j++)
{
sum = 0.0f;
for (z = 0; z < nS; z++) sum = sum + mdpT[i][j][z] * V[z];
Qsa[i][j] = mdpR[i][j] + gammaVI * sum;
}
V[i] = max(Qsa[i], nA);
}
}
// determine the optimal policy given the optimal value function
for (i = 0; i < nS; i++)
{
for (j = 0; j < nA; j++)
{
sum = 0.0f;
for (z = 0; z < nS; z++) sum = sum + mdpT[i][j][z] * V[z];
Qsa[i][j] = mdpR[i][j] + gammaVI * sum;
}
paction[i] = argmax(Qsa[i], nA);
}
}
// ----------------------------------------------------------------------------
// init RL agent
void initRLAgent(void)
{
int i, j, z;
float sum = 0.0f;
// set initial values of estimated values of state (TD(0)))
for (z = 0; z < nS; z++) V[z] = 5.0f;
// init parameters of RL sub-agent for TD(0)
for (i = 0; i < nS; i++)
for (j = 0; j < nA; j++)
{
k[i][j] = 0.0f; r[i][j] = 0.0f; Q[i][j] = 5.0f; p[i] = 0.0f;
}
// set initial values of expected discounted reinforcements (Q-learning)
for (i = 0; i < nS; i++)
for (j = 0; j < nA; j++)
Qsa[i][j] = 5.0f;
}
// ----------------------------------------------------------------------------
// init agent
void initAgent(int _ag)
{
// int i;
switch (_ag)
{
case 0: break;
case 1: initPerfectAgent(); break;
// case 2: for(i=0;i<nA;i++){k[i]=0; r[i]=0; Q[i]=1.0f;}; action = uRand(nA); break;
// case 3: for(i=0;i<nA;i++){k[i]=0; r[i]=0; Q[i]=1.0f;}; action = uRand(nA); break;
// case 4: for(i=0;i<nA;i++){k[i]=0; r[i]=0; Q[i]=0.0f; p[i]=0.0f;}; action = uRand(nA); break;
case 5: initRLAgent(); break;
case 6: initRLAgent(); break;
default: printf("lab3 error: wrong agent code specified\n");
}
}
// ----------------------------------------------------------------------------
// random agent
int randomAgent(void)
{
return uRand(nA);
}
// ----------------------------------------------------------------------------
// perfect agent (for MDP)
int perfectAgent(void)
{
return paction[mdpState];
}
// ----------------------------------------------------------------------------
// greedy RL
int greedy(void)
{
int ps = mdpPrevState;
int _action = action;
// modify estimated action value for previous state
r[ps][action] += AHCresponse;
k[ps][action]++;
Q[ps][action] = r[ps][action] / k[ps][action];
// select next action in current state
_action = argmax(Q[mdpState], nA);
return _action;
}
// ----------------------------------------------------------------------------
// epsilon greedy RL
int epsilonGreedy(void)
{
int ps = mdpPrevState;
int _action = action;
// modify estimated action value for previous state
r[ps][action] += AHCresponse;
k[ps][action]++;
Q[ps][action] = r[ps][action] / k[ps][action];
// select next action in current state
float rnum = (float)rand() / (float)RAND_MAX;
if (rnum < e) _action = uRand(nA);
else _action = argmax(Q[mdpState], nA);
return _action;
}
// ----------------------------------------------------------------------------
// softmax action selection
int softmax(void)
{
int i;
int ps = mdpPrevState;
int _action = action;
float tmp[NACTIONS], pSum = 0.0f;
// modify estimated action value for previous state
r[ps][action] += AHCresponse;
k[ps][action]++;
Q[ps][action] = r[ps][action] / k[ps][action];
// modify values of actions' selection probabilities
for (i = 0; i < nA; i++) { tmp[i] = expf(Q[mdpState][i] / tau); pSum += tmp[i]; }
for (i = 0; i < nA; i++) { p[i] = tmp[i] / pSum; }
// select next action in current state
_action = dRand(p, nA);
return _action;
}
// ----------------------------------------------------------------------------
// temporal difference method -> TD(0)
int td0(void)
{
int ps, cs;
int _action;
// select first action randomly
if (t == 0) { mdpPrevState = mdpState; return uRand(nA); }
// adaptive heuristic critic
ps = mdpPrevState; // previous state of MDP
cs = mdpState; // current state of MDP
V[ps] = V[ps] + alfaTD * (((float)response + (gammaTD * V[cs])) - V[ps]);
AHCresponse = V[ps];
// call RL sub-agent
_action = greedy();
//_action = epsilonGreedy();
//_action = softmax();
mdpPrevState = mdpState;
return _action;
}
// ----------------------------------------------------------------------------
// Q-learning
int qLearning(void)
{
int ps, cs;
int a = action;
int _action;
float maxQ;
// select first action randomly
if (t == 0) { mdpPrevState = mdpState; return uRand(nA); }
// modify estimated action value Q(s,a) (= expected discounted reinforcement)
ps = mdpPrevState; // previous state of MDP
cs = mdpState; // current state of MDP
maxQ = max(Qsa[cs], nA);
Qsa[ps][a] = Qsa[ps][a] + alfaQL * (((float)response + (gammaQL * maxQ)) - Qsa[ps][a]);
// select next action
_action = argmax(Qsa[cs], nA);
mdpPrevState = mdpState;
return _action;
}
// ----------------------------------------------------------------------------
// agent
int agent(int _ag)
{
int _a = 0;
switch (_ag)
{
case 0: _a = randomAgent(); break;
case 1: _a = perfectAgent(); break;
// case 2: _a = greedy(); break;
// case 3: _a = epsilonGreedy(); break;
// case 4: _a = softmax(); break;
case 5: _a = td0(); break;
case 6: _a = qLearning(); break;
default: printf("lab3 error: wrong agent code specified\n");
}
return _a;
}
// ----------------------------------------------------------------------------
// simulation
void simulation(int _i)
{
initAgent(agt);
sumR = 0.0f;
avrR = 0.0f;
for (t = 0; t < T; t++) {
// get action of agent
action = agent(agt);
// get response of environment
response = environment(env);
// calculate cumulative results
sumR = sumR + (float)response;
avrR = sumR / ((float)t + 1);
// save results
_sumR[t][_i] = sumR;
_avrR[t][_i] = avrR;
}
}
// ----------------------------------------------------------------------------
// get mean values of simulation results
void getMeanValues(void)
{
for (t = 0; t < T; t++)
{
float tmps1 = 0.0f;
float tmps2 = 0.0f;
for (int i = 0; i < n; i++)
{
tmps1 += _sumR[t][i];
tmps2 += _avrR[t][i];
}
sumRm[t] = (float)tmps1 / (float)n;
avrRm[t] = (float)tmps2 / (float)n;
}
}
// ----------------------------------------------------------------------------
// get variances of simulation results
void getVarianceValues(void)
{
for (t = 0; t < T; t++)
{
float tmps1 = 0.0f;
float tmps2 = 0.0f;
for (int i = 0; i < n; i++)
{
tmps1 += (sumRm[t] - _sumR[t][i]) * (sumRm[t] - _sumR[t][i]);
tmps2 += (avrRm[t] - _avrR[t][i]) * (avrRm[t] - _avrR[t][i]);
}
sumRv[t] = (float)tmps1 / (float)(n - 1);
avrRv[t] = (float)tmps2 / (float)(n - 1);
//sumRv[t] = (float)tmps1 / (float)n;
//avrRv[t] = (float)tmps2 / (float)n;
}
}
// ----------------------------------------------------------------------------
// main
int main(int argc, char* argv[])
{
int i;
// init random-number generator
srand((unsigned)time(NULL));
// init environment
mdpInit();
// save parameters of experiment
saveParameters();
// run experiment for random agent
agt = 0;
for (i = 0; i < n; i++) simulation(i);
getMeanValues();
getVarianceValues();
saveResultsRA();
// run experiment for perfect agent
agt = 1;
for (i = 0; i < n; i++) simulation(i);
getMeanValues();
getVarianceValues();
saveResultsPA();
// run experiment for RL-agent
agt = RLTYPE;
for (i = 0; i < n; i++) simulation(i);
getMeanValues();
getVarianceValues();
saveResultsRL();
return 0;
}
3. Результати виконання програми:
Параметри експерименту:
T = 500
n = 1
env = 1
nA = 3
nS = 3
RL-agent type = 5
====================
R(s0,a0) = 7
R(s0,a1) = 5
R(s0,a2) = 1
--------------------
R(s1,a0) = 1
R(s1,a1) = 8
R(s1,a2) = 0
--------------------
R(s2,a0) = 4
R(s2,a1) = 3
R(s2,a2) = 0
====================
T(s0,a0,s0) = 0.312214
T(s0,a0,s1) = 0.216065
T(s0,a0,s2) = 0.471720
--------------------
T(s0,a1,s0) = 0.162951
T(s0,a1,s1) = 0.450980
T(s0,a1,s2) = 0.386069
--------------------
T(s0,a2,s0) = 0.415337
T(s0,a2,s1) = 0.521689
T(s0,a2,s2) = 0.062974
--------------------
T(s1,a0,s0) = 0.245622
T(s1,a0,s1) = 0.072671
T(s1,a0,s2) = 0.681707
--------------------
T(s1,a1,s0) = 0.668826
T(s1,a1,s1) = 0.313419
T(s1,a1,s2) = 0.017754
--------------------
T(s1,a2,s0) = 0.498938
T(s1,a2,s1) = 0.231973
T(s1,a2,s2) = 0.269089
--------------------