-
Notifications
You must be signed in to change notification settings - Fork 0
Expand file tree
/
Copy pathTrainingTester.py
More file actions
38 lines (29 loc) · 1.07 KB
/
TrainingTester.py
File metadata and controls
38 lines (29 loc) · 1.07 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
import os
import gym
from stable_baselines3 import PPO
from stable_baselines3.common.vec_env import DummyVecEnv
from stable_baselines3.common.evaluation import evaluate_policy
env = gym.make('HumanoidStandup-v2')
log_path = os.path.join('Training','Logs')
env = DummyVecEnv([lambda:env])
PPO_Path = os.path.join('Training','Saved Models','PPOHumanStand10M5')
#Load the model:
model = PPO.load(PPO_Path,env=env)
#Run the model in the environment and get rewards
env = gym.make('HumanoidStandup-v2')
episodes = 10
for episode in range(1,episodes+1):
obs = env.reset()
done = False
score = 0
while not done:
#env.render()
action, _ = model.predict(obs) # By doing this, rather than taking a random action, the model is used to take actions
obs, reward, done, info = env.step(action)
score += reward
#print('Episode:{} Score:{}'.format(episode, score))
rewardArray = []
rewardArray.append(score)
env.close()
meanReward = sum(rewardArray)/len(rewardArray)
print('Mean Reward for 10 Episodes: {}'.format(meanReward))