Update generate.py
Browse files- generate.py +7 -2
generate.py
CHANGED
|
@@ -1,17 +1,22 @@
|
|
|
|
|
|
|
|
| 1 |
import torch
|
| 2 |
import torch.nn.functional as F
|
| 3 |
-
from evo_model import EvoDecoder
|
| 4 |
from transformers import GPT2Tokenizer
|
|
|
|
| 5 |
|
|
|
|
| 6 |
tokenizer = GPT2Tokenizer.from_pretrained("gpt2")
|
|
|
|
| 7 |
|
|
|
|
| 8 |
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
|
| 9 |
model = EvoDecoder(
|
| 10 |
vocab_size=tokenizer.vocab_size,
|
| 11 |
d_model=256,
|
| 12 |
nhead=4,
|
| 13 |
num_layers=3,
|
| 14 |
-
dim_feedforward=512
|
| 15 |
).to(device)
|
| 16 |
|
| 17 |
model.load_state_dict(torch.load("evo_decoder.pt", map_location=device))
|
|
|
|
| 1 |
+
# generate.py
|
| 2 |
+
|
| 3 |
import torch
|
| 4 |
import torch.nn.functional as F
|
|
|
|
| 5 |
from transformers import GPT2Tokenizer
|
| 6 |
+
from evo_decoder import EvoDecoder
|
| 7 |
|
| 8 |
+
# Load tokenizer
|
| 9 |
tokenizer = GPT2Tokenizer.from_pretrained("gpt2")
|
| 10 |
+
tokenizer.pad_token = tokenizer.eos_token # Safe default
|
| 11 |
|
| 12 |
+
# Load model
|
| 13 |
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
|
| 14 |
model = EvoDecoder(
|
| 15 |
vocab_size=tokenizer.vocab_size,
|
| 16 |
d_model=256,
|
| 17 |
nhead=4,
|
| 18 |
num_layers=3,
|
| 19 |
+
dim_feedforward=512
|
| 20 |
).to(device)
|
| 21 |
|
| 22 |
model.load_state_dict(torch.load("evo_decoder.pt", map_location=device))
|