diff --git a/backend/__pycache__/agent.cpython-311.pyc b/backend/__pycache__/agent.cpython-311.pyc
index d5d2be1f7bb1a3a05b10f889cf2438e0008f0b34..b923c8d3718583e5fe88fd6550df71d8f7a866ad 100644
Binary files a/backend/__pycache__/agent.cpython-311.pyc and b/backend/__pycache__/agent.cpython-311.pyc differ
diff --git a/backend/__pycache__/environment.cpython-311.pyc b/backend/__pycache__/environment.cpython-311.pyc
index 9bea13c82289da5144dd2bcf2091da08eca74371..8b3d8fbd5f9251c5f46420212b4826b45735b12d 100644
Binary files a/backend/__pycache__/environment.cpython-311.pyc and b/backend/__pycache__/environment.cpython-311.pyc differ
diff --git a/backend/__pycache__/eventlog.cpython-311.pyc b/backend/__pycache__/eventlog.cpython-311.pyc
index 5c000c77be103887b9d6c9671a5555bb455cc198..d273ec244fd65709257a7c12cca9ac63a133d2ca 100644
Binary files a/backend/__pycache__/eventlog.cpython-311.pyc and b/backend/__pycache__/eventlog.cpython-311.pyc differ
diff --git a/backend/__pycache__/simplesimmodel.cpython-311.pyc b/backend/__pycache__/simplesimmodel.cpython-311.pyc
index 84c65e33b50af4d2fb64b408c2146308eb6bfd54..f70059a9205f5848059d7a7960c9e02ccf4e7066 100644
Binary files a/backend/__pycache__/simplesimmodel.cpython-311.pyc and b/backend/__pycache__/simplesimmodel.cpython-311.pyc differ
diff --git a/backend/agent.py b/backend/agent.py
index e18cf0b1fc644c2bbbb7b09f8e1bc85a446854f6..e75453c055229586c9597b39b5e9cc52d4327e28 100644
--- a/backend/agent.py
+++ b/backend/agent.py
@@ -52,7 +52,7 @@ def q_learning(space, activities):
mean_reward = 0
# Train the agent using Q-learning
- num_episodes = 100
+ num_episodes = 1000
for episode in range(num_episodes):
state, _ = env.reset()
state = env.flatten_observation_to_int(state)
@@ -77,7 +77,12 @@ def q_learning(space, activities):
old_state = state
state = next_state
- # comment
+ """
+ if old_state != state:
+ print(state)
+ print(action)
+ print(Q[state][action])
+ """
time = env.process.env.now - start
@@ -85,10 +90,19 @@ def q_learning(space, activities):
mean_reward += reward
- """
if (episode % 20 == 19):
+ mean_reward /= 20
mean_time /= 20
- print(f"Episode {episode-19} to episode {episode}: mean time = {mean_time}")
- """
-
- print(f"Episode {episode}: time = {time}")
\ No newline at end of file
+ print(f"Episode {episode-19} to episode {episode}: mean time = {mean_time}, mean reward: {mean_reward}")
+
+ if episode == 19:
+ start_reward = mean_reward
+
+ # print(f"Episode {episode}: time = {time}, reward = {reward}")
+
+ if episode == 999:
+ end_reward = mean_reward
+ improvement = end_reward - start_reward
+ print(f"Reward improved by {improvement}")
+
+ return Q