微信扫码
与创始人交个朋友
我要投稿
{'label': 0,'text': 'I got \'new\' tires from them and within two weeks got a flat...'}
from datasets import load_dataset
# 下载 YelpReviewFull 数据集
dataset = load_dataset("yelp_review_full")
from transformers import AutoTokenizer
# 加载预训练的 BERT Tokenizer
tokenizer = AutoTokenizer.from_pretrained("bert-base-cased")
def tokenize_function(examples):
"""
使用 Tokenizer 对文本进行编码,并进行填充和截断
"""
return tokenizer(examples["text"], padding="max_length", truncation=True)
# 对数据集进行预处理
tokenized_datasets = dataset.map(tokenize_function, batched=True)
# 从数据集中抽样 1000 个训练样本
small_train_dataset = tokenized_datasets["train"].shuffle(seed=42).select(range(1000))
# 从数据集中抽样 1000 个测试样本
small_eval_dataset = tokenized_datasets["test"].shuffle(seed=42).select(range(1000))
from transformers import AutoModelForSequenceClassification
# 加载 BERT 模型,并设置标签数量为 5(情感评分从 1 到 5)
model = AutoModelForSequenceClassification.from_pretrained("bert-base-cased", num_labels=5)
from transformers import TrainingArguments
model_dir = "models/bert-base-cased-finetune-yelp"
# 配置训练参数
training_args = TrainingArguments(
output_dir=model_dir,# 模型保存路径
per_device_train_batch_size=16,# 每个设备的训练批次大小
num_train_epochs=5,# 训练轮数
logging_steps=100# 每 100 步记录一次日志
)
import numpy as np
import evaluate
# 加载准确率指标
metric = evaluate.load("accuracy")
def compute_metrics(eval_pred):
"""
计算准确率
"""
logits, labels = eval_pred
predictions = np.argmax(logits, axis=-1)# 将 logits 转换为预测值
return metric.compute(predictions=predictions, references=labels)
from transformers import Trainer
# 实例化 Trainer
trainer = Trainer(
model=model,
args=training_args,
train_dataset=small_train_dataset,# 训练数据集
eval_dataset=small_eval_dataset,# 验证数据集
compute_metrics=compute_metrics # 计算指标的函数
)
# 更新训练参数配置training_args = TrainingArguments(output_dir=model_dir,evaluation_strategy="epoch",# 每个 epoch 结束时进行评估per_device_train_batch_size=16,num_train_epochs=3,logging_steps=30# 每 30 步记录一次日志)
# 开始训练trainer.train()
在训练过程中,使用 nvidia-smi 命令监控 GPU 的使用情况,以确保训练过程的高效进行:
watch -n 1 nvidia-smi
# 保存训练后的模型
trainer.save_model(model_dir)
# 保存训练状态
trainer.save_state()
# 导入必要的库
from datasets import load_dataset
from transformers import AutoTokenizer, AutoModelForSequenceClassification, TrainingArguments, Trainer
import numpy as np
import evaluate
# 数据集下载
dataset = load_dataset("yelp_review_full")
# 数据预处理
tokenizer = AutoTokenizer.from_pretrained("bert-base-cased")
def tokenize_function(examples):
"""
使用 Tokenizer 对文本进行编码,并进行填充和截断
"""
return tokenizer(examples["text"], padding="max_length", truncation=True)
tokenized_datasets = dataset.map(tokenize_function, batched=True)
# 数据抽样
small_train_dataset = tokenized_datasets["train"].shuffle(seed=42).select(range(1000))
small_eval_dataset = tokenized_datasets["test"].shuffle(seed=42).select(range(1000))
# 模型加载与训练配置
model = AutoModelForSequenceClassification.from_pretrained("bert-base-cased", num_labels=5)
model_dir = "models/bert-base-cased-finetune-yelp"
training_args = TrainingArguments(
output_dir=model_dir,
per_device_train_batch_size=16,
num_train_epochs=5,
logging_steps=100
)
# 指标评估
metric = evaluate.load("accuracy")
def compute_metrics(eval_pred):
"""
计算准确率
"""
logits, labels = eval_pred
predictions = np.argmax(logits, axis=-1)
return metric.compute(predictions=predictions, references=labels)
# 实例化 Trainer
trainer = Trainer(
model=model,
args=training_args,
train_dataset=small_train_dataset,
eval_dataset=small_eval_dataset,
compute_metrics=compute_metrics
)
# 开始训练
trainer.train()
# 监控 GPU 使用
# 使用命令行工具: watch -n 1 nvidia-smi
# 保存模型和训练状态
trainer.save_model(model_dir)
trainer.save_state()
八、总结
53AI,企业落地应用大模型首选服务商
产品:大模型应用平台+智能体定制开发+落地咨询服务
承诺:先做场景POC验证,看到效果再签署服务协议。零风险落地应用大模型,已交付160+中大型企业
2025-01-09
解码通用 AI Agent:七步构建你的智能系统
2025-01-08
dify案例分享-基于文本模型实现Fine-tune 语料构造工作流
2025-01-08
架构师必备LLM推理优化全解析:Nvidia分享的实用技巧,简单易懂!
2025-01-06
模型Prompt调优的实用技巧与经验分享
2025-01-06
大模型推理框架:Ollama和vLLM到底应该选哪个?
2025-01-06
大模型高效训练一体框架 LLaMA Factory
2025-01-06
增强大模型的推理能力:从思维链到连续思维链(上)
2025-01-06
LLM之模型评估:情感评估/EQ评估/幻觉评估等
2024-09-18
2024-07-11
2024-07-11
2024-07-26
2024-07-09
2024-06-11
2024-10-20
2024-07-20
2024-07-23
2024-07-12