-
Notifications
You must be signed in to change notification settings - Fork 4
Expand file tree
/
Copy pathconvert_llama_to_hf.py
More file actions
138 lines (109 loc) · 5.15 KB
/
convert_llama_to_hf.py
File metadata and controls
138 lines (109 loc) · 5.15 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
import os
import argparse
import torch
import torch
import torch.nn as nn
from modules.llama_modules import LlamaForCausalLM
from modules.llama_modules import LlamaConfig, LlamaTokenizer
from transformers.modeling_utils import no_init_weights
import os
def create_emtpy_llama(config):
import torch
import torch.nn as nn
_reset_parameters_linear = nn.Linear.reset_parameters
def dummy(*args, **kargs):
pass
nn.Linear.reset_parameters = dummy
# 1. disable init for faster initialization
# 2. avoid tie token embeddings with lm_head, as we train them separately.
with no_init_weights(_enable=True):
model = LlamaForCausalLM(config).eval()
nn.Linear.reset_parameters = _reset_parameters_linear
return model
def load_decentralized_checkpoint(model, checkpoint_path, n_stages=2, n_layer_per_stage=16, ):
input_path = checkpoint_path
n_layers = len(model.model.layers)
assert n_stages * n_layer_per_stage >= len(model.model.layers)
# assert model.lm_head.weight.data is not model.transformer.wte.weight.data
for i in range(n_stages):
print(f'loading stage {i}')
checkpoint = torch.load(os.path.join(input_path, f'prank_{i}_checkpoint.pt'), map_location=torch.device("cpu"))
if i == 0:
_tmp = {k[len(f"{0}."):]:v for k,v in checkpoint.items() if k.startswith(f"0.")}
# torch.save(_tmp, os.path.join(output_path, f'pytorch_embs.pt'))
model.model.embed_tokens.weight.data[:] = _tmp['embed_tokens.weight']
for j in range(n_layer_per_stage):
_tmp = {k[len(f"{j+1}."):]:v for k,v in checkpoint.items() if k.startswith(f"{j+1}.")}
if len(_tmp) == 0:
break
# torch.save(_tmp, os.path.join(output_path, f'pytorch_{j}.pt'))
model.model.layers[j].load_state_dict(_tmp)
elif i == n_stages - 1:
for j in range(n_layer_per_stage):
if i*n_layer_per_stage + j == n_layers:
break
_tmp = {k[len(f"{j}."):]:v for k,v in checkpoint.items() if k.startswith(f"{j}.")}
if len(_tmp) == 0:
break
# torch.save(_tmp, os.path.join(output_path, f'pytorch_{i*n_layer_per_stage + j}.pt'))
model.model.layers[i*n_layer_per_stage + j].load_state_dict(_tmp)
else:
j += 1
_tmp = {k[len(f"{j}."):]:v for k,v in checkpoint.items() if k.startswith(f"{j}.")}
if len(_tmp) == 0:
break
# torch.save(_tmp, os.path.join(output_path, f'pytorch_lm_head.pt'))
model.model.norm.weight.data[:] = _tmp['norm.weight']
if 'norm.bias' in _tmp:
model.model.norm.bias.data[:] = _tmp['norm.bias']
model.lm_head.weight.data[:] = _tmp['lm_head.weight']
if 'lm_head.bias' in _tmp:
model.lm_head.bias.data[:] = _tmp['lm_head.bias']
else:
for j in range(n_layer_per_stage):
_tmp = {k[len(f"{j}."):]:v for k,v in checkpoint.items() if k.startswith(f"{j}.")}
if len(_tmp) == 0:
break
# torch.save(_tmp, os.path.join(output_path, f'pytorch_{i*n_layer_per_stage + j}.pt'))
model.model.layers[i*n_layer_per_stage + j].load_state_dict(_tmp)
return model
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Convert HF checkpoints')
parser.add_argument('--config-name', type=str, default='EleutherAI/gpt-neox-20b',
help='config-name')
parser.add_argument('--ckpt-path', type=str, default=None,
help='ckpt-path')
parser.add_argument('--save-path', type=str, default=None,
help='save-path')
parser.add_argument('--n-stages', type=int, default=8,
help='pipeline group size')
parser.add_argument('--n-layer-per-stage', type=int, default=6,
help='n layers per GPU device')
parser.add_argument('--fp16', default=False, action='store_true')
args = parser.parse_args()
assert args.ckpt_path is not None
assert args.save_path is not None
if not os.path.exists(args.save_path):
os.mkdir(args.save_path)
# LlamaForCausalLM LlamaConfig LlamaTokenizer
print('loading config...')
config = LlamaConfig.from_pretrained(args.config_name)
print('loaded config.')
print('loading tokenizer...')
tokenizer = LlamaTokenizer.from_pretrained(args.config_name)
print('loaded tokenizer.')
print('creating empty model...')
model = create_emtpy_llama(config)
if args.fp16:
model = model.half()
print('created empty model.')
print('loading model ckpt...')
load_decentralized_checkpoint(
model, args.ckpt_path, n_stages=args.n_stages, n_layer_per_stage=args.n_layer_per_stage,
)
print('loaded model ckpt.')
print('saving HF model...')
model.save_pretrained(args.save_path)
print(f'saved HF model to `{args.save_path}`')
config.save_pretrained(args.save_path)
tokenizer.save_pretrained(args.save_path)