Diff of /lit_gpt/tokenizer.py [000000] .. [248dc9]

Switch to unified view

a b/lit_gpt/tokenizer.py
1
import json
2
from pathlib import Path
3
from typing import Optional, Union
4
5
import torch
6
7
8
class Tokenizer:
9
    def __init__(self, checkpoint_dir: Union[Path, str]) -> None:
10
        checkpoint_dir = Path(checkpoint_dir)
11
        if not checkpoint_dir.exists():
12
            raise NotADirectoryError(f"The checkpoint directory does not exist: {str(checkpoint_dir)}")
13
14
        self.use_bos = self.check_if_bos_token_used(checkpoint_dir)
15
        self.bos_id = None
16
        self.eos_id = None
17
18
        # some checkpoints have both files, `.model` takes precedence
19
        if (vocabulary_path := checkpoint_dir / "tokenizer.model").is_file():
20
            from sentencepiece import SentencePieceProcessor
21
22
            self.processor = SentencePieceProcessor(model_file=str(vocabulary_path))
23
            self.backend = "sentencepiece"
24
            self.bos_id = self.processor.bos_id()
25
            self.eos_id = self.processor.eos_id()
26
27
        elif (vocabulary_path := checkpoint_dir / "tokenizer.json").is_file():
28
            from tokenizers import Tokenizer as HFTokenizer
29
30
            self.processor = HFTokenizer.from_file(str(vocabulary_path))
31
            self.backend = "huggingface"
32
33
            if (special_tokens_path := checkpoint_dir / "tokenizer_config.json").is_file():
34
                with open(special_tokens_path) as fp:
35
                    config = json.load(fp)
36
                bos_token = config.get("bos_token")
37
                self.bos_id = self.token_to_id(bos_token) if bos_token is not None else None
38
                eos_token = config.get("eos_token")
39
                self.eos_id = self.token_to_id(eos_token) if eos_token is not None else None
40
            if (special_tokens_path := checkpoint_dir / "generation_config.json").is_file():
41
                with open(special_tokens_path) as fp:
42
                    config = json.load(fp)
43
                if self.bos_id is None:
44
                    self.bos_id = config.get("bos_token_id")
45
                if self.eos_id is None:
46
                    self.eos_id = config.get("eos_token_id")
47
        else:
48
            raise NotImplementedError
49
50
    @property
51
    def vocab_size(self) -> int:
52
        if self.backend == "huggingface":
53
            return self.processor.get_vocab_size(with_added_tokens=False)
54
        if self.backend == "sentencepiece":
55
            return self.processor.vocab_size()
56
        raise RuntimeError
57
58
    def token_to_id(self, token: str) -> int:
59
        if self.backend == "huggingface":
60
            id_ = self.processor.token_to_id(token)
61
        elif self.backend == "sentencepiece":
62
            id_ = self.processor.piece_to_id(token)
63
        else:
64
            raise RuntimeError
65
        if id_ is None:
66
            raise ValueError(f"token {token!r} not found in the collection.")
67
        return id_
68
69
    def check_if_bos_token_used(self, checkpoint_dir: Path) -> bool:
70
        if not (tokenizer_config_path := checkpoint_dir / "tokenizer_config.json").is_file():
71
            return False
72
        with open(tokenizer_config_path) as fp:
73
            config = json.load(fp)
74
        if any(config.get(check, False) for check in ("add_bos_token", "add_prefix_space")):
75
            return True
76
        # for examples that also use the Llama tokenizer, but do not have or set add_bos_token to True.
77
        # ex: https://huggingface.co/stabilityai/StableBeluga2/blob/main/tokenizer_config.json#L2
78
        return config.get("add_bos_token") is None and config.get("tokenizer_class") == "LlamaTokenizer"
79
80
    def encode(
81
        self,
82
        string: str,
83
        device: Optional[torch.device] = None,
84
        bos: Optional[bool] = None,
85
        eos: bool = False,
86
        max_length: int = -1,
87
    ) -> torch.Tensor:
88
        if self.backend == "huggingface":
89
            tokens = self.processor.encode(string).ids
90
        elif self.backend == "sentencepiece":
91
            tokens = self.processor.encode(string)
92
        else:
93
            raise RuntimeError
94
        if bos or (bos is None and self.use_bos):
95
            bos_id = self.bos_id
96
            if bos_id is None:
97
                raise NotImplementedError("This tokenizer does not have a defined a bos token")
98
            tokens = [bos_id] + tokens
99
        if eos:
100
            tokens = tokens + [self.eos_id]
101
        if max_length > 0:
102
            tokens = tokens[:max_length]
103
        return torch.tensor(tokens, dtype=torch.int, device=device)
104
105
    def decode(self, tensor: torch.Tensor) -> str:
106
        tokens = [tensor.item()] if tensor.ndim == 0 else tensor.tolist()
107
        return self.processor.decode(tokens)