-
Notifications
You must be signed in to change notification settings - Fork 0
Expand file tree
/
Copy pathmain.py
More file actions
74 lines (52 loc) · 1.79 KB
/
main.py
File metadata and controls
74 lines (52 loc) · 1.79 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
from os import path
import time
from transformers import GPT2LMHeadModel, GPT2Tokenizer
MODEL_DIR = './finetuned-gpt2'
OUTPUT_DIR = './data/out'
def inputModelDir() -> str:
dirpath = input(f'Model directory ({MODEL_DIR}): ').strip()
return dirpath if dirpath else MODEL_DIR
def inputEpisodeSize() -> int:
while True:
size = input('Episode size (512): ').strip().lower()
if not size:
return 512
try:
size = int(size)
if size <= 0:
raise ValueError()
return size
except: pass
print('Size must be a positive number.')
def inputPrompt() -> str:
while True:
prompt = input('Prompt: ').strip()
if prompt: return prompt
print('Prompt must be non empty.')
def save_episode(model, prompt, generated_text) -> None:
timestamp = round(time.time())
filepath = f'{OUTPUT_DIR}/{model}_{timestamp}.txt'
with open(filepath, 'w') as f:
f.write(f'Prompt: {prompt}\n\n' + generated_text)
def main():
model_dir = inputModelDir()
ep_size = inputEpisodeSize()
prompt = inputPrompt()
print('Loading model and tokenizer...')
model = GPT2LMHeadModel.from_pretrained(model_dir)
tokenizer = GPT2Tokenizer.from_pretrained('gpt2')
tokenizer.pad_token = tokenizer.eos_token
input_ids = tokenizer.encode(prompt, return_tensors='pt')
output = model.generate(
input_ids,
num_return_sequences=1,
max_length=ep_size,
pad_token_id=tokenizer.eos_token_id,
do_sample=True
)[0]
generated_text = tokenizer.decode(output, skip_special_tokens=True)
print('\n\n\n')
print(generated_text)
save_episode(model_dir.split('\\')[-1], prompt, generated_text)
if __name__ == '__main__':
main()