12import argparse
13
14import torch
15from torch import nn
16
17from labml_nn.neox.evaluation import run_eval_harness
18from labml_nn.neox.model import LayerGenerator
21def main():
引数パーサー
23 parser = argparse.ArgumentParser()
24
25 parser.add_argument("--flash", action='store_true', help="whether to use Flash Attention")
26
27 opt = parser.parse_args()
端末
30 device = torch.device('cuda:0')
レイヤーをロード
32 layers = list(LayerGenerator(is_clone_layers=True,
33 filter_layers=None,
34 dtype=torch.float16,
35 device=device,
36 is_flash_attention=opt.flash,
37 ).load())
nn.Sequential
モデル作成
40 model = nn.Sequential(*layers)
43 print(run_eval_harness(model, 'half_precision', ['lambada'], device))
47if __name__ == '__main__':
48 main()