-
Notifications
You must be signed in to change notification settings - Fork 24
/
Copy pathFinetune-meta-Llama-2-7b-hf.py
89 lines (75 loc) · 2.57 KB
/
Finetune-meta-Llama-2-7b-hf.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
import os
import torch
import torch.nn as nn
import bitsandbytes as bnb
import transformers as transformers
from transformers import AutoTokenizer, AutoConfig, AutoModelForCausalLM
from transformers import BitsAndBytesConfig
from peft import LoraConfig, get_peft_model
from datasets import load_dataset
#Setup the model
model_id="meta-llama/Llama-2-7b-hf"
tokenizer = AutoTokenizer.from_pretrained(model_id)
model = AutoModelForCausalLM.from_pretrained(model_id, device_map="auto", load_in_8bit=True)
print(model.get_memory_footprint())
#Freezing the original weights
for param in model.parameters():
param.requires_grad = False
if param.ndim ==1:
param.data = param.data.to(torch.float32)
model.gradient_checkpointing_enable()
model.enable_input_require_grads()
class CastOutputToFloat(nn.Sequential):
def forward(self, x): return super().forward(x).to(torch.float32)
model.lm_head = CastOutputToFloat(model.lm_head)
#Setting up the LoRa Adapters
def print_trainable_parameters(model):
trainable_params = 0
all_param = 0
for _, param in model.named_parameters():
all_param += param.numel()
if param.requires_grad:
trainable_params += param.numel()
print(
f"trainable params: {trainable_params} || all params: {all_param} || trainable%: {100 * trainable_params / all_param}"
)
config = LoraConfig(
r=16,
lora_alpha=32,
lora_dropout=0.05,
bias = 'none',
task_type="CAUSAL_LM"
)
model = get_peft_model(model, config)
print_trainable_parameters(model)
data = load_dataset("Abirate/english_quotes")
def merge_colunms(example):
example['prediction'] = example['quote'] + " ->: " + str(example["tags"])
return example
data['train'] = data['train'].map(merge_colunms)
print(data['train']["prediction"][:5])
print(data['train'][0])
data = data.map(lambda samples: tokenizer(samples['prediction']), batched=True)
print(data)
#Training
trainer = transformers.Trainer(
model=model,
train_dataset=data['train'],
args=transformers.TrainingArguments(
per_gpu_train_batch_size=4,
gradient_accumulation_steps=4,
warmup_steps=100,
max_steps=200,
learning_rate=2e-4,
fp16=True,
logging_steps=1,
output_dir='outputs'
),
data_collator=transformers.DataCollatorForLanguageModeling(tokenizer, mlm=False)
)
model.config.use_cache = False
trainer.train()
model.push_to_hub("meetrais/meta-Llama-2-7b-hf-finetuned",
token="HuggingFace-app-key",
commit_message="basic training",
private=True)