-
Notifications
You must be signed in to change notification settings - Fork 2
Expand file tree
/
Copy pathmodel_inputs.py
More file actions
60 lines (50 loc) · 2.76 KB
/
model_inputs.py
File metadata and controls
60 lines (50 loc) · 2.76 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
from torch.utils.data import DataLoader
from torch.utils.data.distributed import DistributedSampler
from torch.utils.data import SequentialSampler
from transformers import DataCollatorForSeq2Seq
class MODEL_INPUTS:
def __init__(self,
prompter,
tokenizer,
max_length: int):
self.prompter = prompter
self.tokenizer = tokenizer
self.max_length = max_length
def tokenize(self, prompt, add_eos_token = True):
result = self.tokenizer(prompt,
truncation = True,
max_length = self.max_length,
padding = False,
return_tensors = None)
if (
result["input_ids"][-1] != self.tokenizer.eos_token_id
and len(result["input_ids"]) < self.max_length
and add_eos_token
):
result["input_ids"].append(self.tokenizer.eos_token_id)
result["attention_mask"].append(1)
result["labels"] = result["input_ids"].copy()
return result
def generate_and_tokenize_prompt(self, dataset):
full_prompt = self.prompter.generate_prompt(dataset["instruction"],
dataset["input"],
dataset["output"])
tokenized_full_prompt = self.tokenize(full_prompt)
return tokenized_full_prompt
def prepare_dataloader(self,
train_data,
valid_data,
batch_size: int):
train_dataloader = DataLoader(dataset = train_data,
batch_size = batch_size,
sampler = DistributedSampler(train_data),
collate_fn = DataCollatorForSeq2Seq(tokenizer = self.tokenizer,
padding = True,
return_tensors = "pt"))
valid_dataloader = DataLoader(dataset = valid_data,
batch_size = batch_size,
sampler = SequentialSampler(valid_data),
collate_fn = DataCollatorForSeq2Seq(tokenizer = self.tokenizer,
padding = True,
return_tensors = "pt"))
return train_dataloader, valid_dataloader