-
Notifications
You must be signed in to change notification settings - Fork 0
Expand file tree
/
Copy pathsample_run.py
More file actions
85 lines (70 loc) · 2.68 KB
/
sample_run.py
File metadata and controls
85 lines (70 loc) · 2.68 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
import os
import argparse
import numpy as np
import matplotlib.pyplot as plt
import sys
import pickle
import torch
from scheduler import PGScheduler
from env.mrc_problem import MRCProblem
from env.hybrid_team import HybridTeam
from env.scheduling_env import SchedulingEnv
from env.multi_round_scheduling_env import MultiRoundSchedulingEnv
if __name__ == '__main__':
"""
"""
parser = argparse.ArgumentParser()
parser.add_argument('--cp', type=str, default="tmp/small_training_set/checkpoints_13_pg")
parser.add_argument('--cp-version', type=int, default=3200)
parser.add_argument('--problem-dir', type=str, default="tmp/small_test_set")
parser.add_argument('--problem-num', type=int, default=1)
parser.add_argument('--mode', type=str, default="argmax")
# Batch Count
parser.add_argument('--batch-size', type=int, default=8)
# Round Count
parser.add_argument('--num_rounds', type=int, default=4)
args = parser.parse_args()
# Checkpoint
cp_parent = args.cp
checkpoint = args.cp_version
checkpoint_folder = cp_parent + "/checkpoint_%05d.tar" % checkpoint
# Problem
fname = args.problem_dir + '/problem_' + format(args.problem_num, '04')
# Selection Mode
mode = args.mode
batch_size = args.batch_size
num_rounds = args.num_rounds
human_learning = True
'''
Load model
'''
# scheduler = PGScheduler(device=torch.device('cpu',0))
scheduler = PGScheduler(device=torch.device('cuda',0), selection_mode=mode)
scheduler.load_checkpoint(checkpoint_folder)
print('Loaded: '+checkpoint_folder)
# load env from data folder
problem = MRCProblem(fname = fname)
print(problem.dur)
# Create a Team
team = HybridTeam(problem)
# scheduler already being loaded outside of this function
if mode == 'sample':
multi_round_envs = [MultiRoundSchedulingEnv(problem, team) for i in range(batch_size)]
for i_b in range(batch_size):
print('Batch {}/{}.'.format(i_b+1, batch_size))
for step_count in range(num_rounds):
schedule = scheduler.select_action(multi_round_envs[i_b].get_single_round())
success, reward, done, makespan = multi_round_envs[i_b].step(schedule, human_learning=human_learning)
print("Schedule:", schedule)
print("Durations", problem.dur)
print("Wait Conditions", problem.wait)
print("Makespan:", makespan)
elif mode == 'argmax':
multi_round_env = MultiRoundSchedulingEnv(problem, team)
for step_count in range(num_rounds):
schedule = scheduler.select_action(multi_round_env.get_single_round())
success, reward, done, makespan = multi_round_env.step(schedule, human_learning=human_learning)
print("Schedule:", schedule)
print("Durations", problem.dur)
print("Wait Conditions", problem.wait)
print("Makespan:", makespan)