微信扫码
添加专属顾问
我要投稿
九天老师公开课深度解析,带你领略60G内存+14G显存下DeepSeek R1模型的高效部署。
核心内容:
1. KTransformer与Unsloth方案对比介绍
2. 动态量化技术与模型性能测试分析
3. 实际部署案例与性能优化策略
screen -S kt
screen -r kt
pip install modelscope
mkdir ./DeepSeek-R1-GGUF
modelscope download --model unsloth/DeepSeek-R1-GGUF --include '**UD-IQ1_S**' --local_dir /root/autodl-tmp/DeepSeek-R1-GGUF
mkdir ./DeepSeek-R1
modelscope download --model deepseek-ai/DeepSeek-R1 --exclude '*.safetensors' --local_dir /root/autodl-tmp/DeepSeek-R1
conda create --name kt python=3.11
conda init
source ~/.bashrc
conda activate kt
sudo apt-get update
sudo apt-get install gcc g++ cmake ninja-build
pip install torch packaging ninja cpufeature numpy
pip install flash-attn
sudo apt-get install --only-upgrade libstdc++6
conda install -c conda-forge libstdcxx-ng
conda install conda-libmamba-solver=24.11.0
tar -xzvf ktransformers_offline.tar.gz
cd ktransformers
export USE_NUMA=1
sh ./install.sh
pip show ktransformers
python ./ktransformers/local_chat.py --model_path /root/autodl-tmp/DeepSeek-R1 --gguf_path /root/autodl-tmp/DeepSeek-R1-GGUF/DeepSeek-R1-UD-IQ1_S --max_new_tokens 2048 --force_think true
ktransformers --model_path /root/autodl-tmp/DeepSeek-R1 --gguf_path /root/autodl-tmp/DeepSeek-R1-GGUF/DeepSeek-R1-UD-IQ1_S --port 10002
curl -X POST \
'http://localhost:10002/v1/chat/completions' \
-H 'accept: application/json' \
-H 'Content-Type: application/json' \
-d '{
"messages": [
{
"content": "你好呀。",
"role": "user"
}
],
"model": "DeepSeek-R1",
"stream": false
}'
pip install openai
from openai import OpenAI
ds_api_key = "none"
# 实例化客户端
client = OpenAI(api_key=ds_api_key,
base_url="http://localhost:10002/v1")
# 调用 deepseek 模型
response = client.chat.completions.create(
model="Deepseek-R1",
messages=[
{"role": "user", "content": "你好,好久不见!请介绍下你自己。"}
]
)
response
import re
# 原始文本
text = response.choices[0].message.content
# 使用正则表达式提取<think>和</think>之间的内容
think_content = re.search(r'<think>(.*?)</think>', text, re.DOTALL)
# 提取到的内容
think_content_text = think_content.group(1) if think_content else None
think_content_text
pip install open-webui
export HF_HUB_OFFLINE=1
open-webui serve
import json
import httpx
import re
from typing import AsyncGenerator, Callable, Awaitable
from pydantic import BaseModel, Field
import asyncio
import traceback
class Pipe:
class Valves(BaseModel):
DEEPSEEK_API_BASE_URL: str = Field(
default="https://api.deepseek.com/v1",
description="DeepSeek API的基础请求地址",
)
DEEPSEEK_API_KEY: str = Field(
default="", description="用于身份验证的DeepSeek API密钥,可从控制台获取"
)
DEEPSEEK_API_MODEL: str = Field(
default="deepseek-reasoner",
description="API请求的模型名称,默认为 deepseek-reasoner,多模型名可使用`,`分隔",
)
def __init__(self):
self.valves = self.Valves()
self.data_prefix = "data:"
self.emitter = None
def pipes(self):
models = self.valves.DEEPSEEK_API_MODEL.split(",")
return [
{
"id": model.strip(),
"name": model.strip(),
}
for model in models
]
asyncdef pipe(
self, body: dict, __event_emitter__: Callable[[dict], Awaitable[None]] = None
) -> AsyncGenerator[str, None]:
"""主处理管道(已移除缓冲)"""
thinking_state = {"thinking": -1} # 用于存储thinking状态
self.emitter = __event_emitter__
# 用于存储联网模式下返回的参考资料列表
stored_references = []
# 联网搜索供应商 0-无 1-火山引擎 2-PPLX引擎 3-硅基流动
search_providers = 0
waiting_for_reference = False
# 用于处理硅基的 [citation:1] 的栈
citation_stack_reference = [
"[",
"c",
"i",
"t",
"a",
"t",
"i",
"o",
"n",
":",
"",
"]",
]
citation_stack = []
# 临时保存的未处理的字符串
unprocessed_content = ""
# 验证配置
ifnot self.valves.DEEPSEEK_API_KEY:
yield json.dumps({"error": "未配置API密钥"}, ensure_ascii=False)
return
# 准备请求参数
headers = {
"Authorization": f"Bearer {self.valves.DEEPSEEK_API_KEY}",
"Content-Type": "application/json",
}
try:
# 模型ID提取
model_id = body["model"].split(".", 1)[-1]
payload = {**body, "model": model_id}
# 处理消息以防止连续的相同角色
messages = payload["messages"]
i = 0
while i < len(messages) - 1:
if messages[i]["role"] == messages[i + 1]["role"]:
# 插入具有替代角色的占位符消息
alternate_role = (
"assistant"if messages[i]["role"] == "user"else"user"
)
messages.insert(
i + 1,
{"role": alternate_role, "content": "[Unfinished thinking]"},
)
i += 1
# 发起API请求
asyncwith httpx.AsyncClient(http2=True) as client:
asyncwith client.stream(
"POST",
f"{self.valves.DEEPSEEK_API_BASE_URL}/chat/completions",
json=payload,
headers=headers,
timeout=300,
) as response:
# 错误处理
if response.status_code != 200:
error = await response.aread()
yield self._format_error(response.status_code, error)
return
# 流式处理响应
asyncfor line in response.aiter_lines():
ifnot line.startswith(self.data_prefix):
continue
# 截取 JSON 字符串
json_str = line[len(self.data_prefix) :].strip()
# 去除首尾空格后检查是否为结束标记
if json_str == "[DONE]":
return
try:
data = json.loads(json_str)
except json.JSONDecodeError as e:
error_detail = f"解析失败 - 内容:{json_str},原因:{e}"
yield self._format_error("JSONDecodeError", error_detail)
return
if search_providers == 0:
# 检查 delta 中的搜索结果
choices = data.get("choices")
ifnot choices or len(choices) == 0:
continue# 跳过没有 choices 的数据块
delta = choices[0].get("delta", {})
if delta.get("type") == "search_result":
search_results = delta.get("search_results", [])
if search_results:
ref_count = len(search_results)
yield'<details type="search">\n'
yieldf"<summary>已搜索 {ref_count} 个网站</summary>\n"
for idx, result in enumerate(search_results, 1):
yieldf'> {idx}. [{result["title"]}]({result["url"]})\n'
yield"</details>\n"
search_providers = 3
stored_references = search_results
continue
# 处理参考资料
stored_references = data.get("references", []) + data.get(
"citations", []
)
if stored_references:
ref_count = len(stored_references)
yield'<details type="search">\n'
yieldf"<summary>已搜索 {ref_count} 个网站</summary>\n"
# 如果data中有references,则说明是火山引擎的返回结果
if data.get("references"):
for idx, reference in enumerate(stored_references, 1):
yieldf'> {idx}. [{reference["title"]}]({reference["url"]})\n'
yield"</details>\n"
search_providers = 1
# 如果data中有citations,则说明是PPLX引擎的返回结果
elif data.get("citations"):
for idx, reference in enumerate(stored_references, 1):
yieldf"> {idx}. {reference}\n"
yield"</details>\n"
search_providers = 2
# 方案 A: 检查 choices 是否存在且非空
choices = data.get("choices")
ifnot choices or len(choices) == 0:
continue# 跳过没有 choices 的数据块
choice = choices[0]
# 结束条件判断
if choice.get("finish_reason"):
return
# 状态机处理
state_output = await self._update_thinking_state(
choice.get("delta", {}), thinking_state
)
if state_output:
yield state_output
if state_output == "<think>":
yield"\n"
# 处理并立即发送内容
content = self._process_content(choice["delta"])
if content:
# 处理思考状态标记
if content.startswith("<think>"):
content = re.sub(r"^<think>", "", content)
yield"<think>"
await asyncio.sleep(0.1)
yield"\n"
elif content.startswith("</think>"):
content = re.sub(r"^</think>", "", content)
yield"</think>"
await asyncio.sleep(0.1)
yield"\n"
# 处理参考资料
if search_providers == 1:
# 火山引擎的参考资料处理
# 如果文本中包含"摘要",设置等待标志
if"摘要"in content:
waiting_for_reference = True
yield content
continue
# 如果正在等待参考资料的数字
if waiting_for_reference:
# 如果内容仅包含数字或"、"
if re.match(r"^(\d+|、)$", content.strip()):
numbers = re.findall(r"\d+", content)
if numbers:
num = numbers[0]
ref_index = int(num) - 1
if0 <= ref_index < len(stored_references):
ref_url = stored_references[ref_index][
"url"
]
else:
ref_url = ""
content = f"[[{num}]]({ref_url})"
# 保持等待状态继续处理后续数字
# 如果遇到非数字且非"、"的内容且不含"摘要",停止等待
elifnot"摘要"in content:
waiting_for_reference = False
elif search_providers == 2:
# PPLX引擎的参考资料处理
def replace_ref(m):
idx = int(m.group(1)) - 1
if0 <= idx < len(stored_references):
returnf"[[{m.group(1)}]]({stored_references[idx]})"
returnf"[[{m.group(1)}]]()"
content = re.sub(r"\[(\d+)\]", replace_ref, content)
elif search_providers == 3:
skip_outer = False
if len(unprocessed_content) > 0:
content = unprocessed_content + content
unprocessed_content = ""
for i in range(len(content)):
# 检查 content[i] 是否可访问
if i >= len(content):
break
# 检查 citation_stack_reference[len(citation_stack)] 是否可访问
if len(citation_stack) >= len(
citation_stack_reference
):
break
if (
content[i]
== citation_stack_reference[len(citation_stack)]
):
citation_stack.append(content[i])
# 如果 citation_stack 的位数等于 citation_stack_reference 的位数,则修改为 URL 格式返回
if len(citation_stack) == len(
citation_stack_reference
):
# 检查 citation_stack[10] 是否可访问
if len(citation_stack) > 10:
ref_index = int(citation_stack[10]) - 1
# 检查 stored_references[ref_index] 是否可访问
if (
0
<= ref_index
< len(stored_references)
):
ref_url = stored_references[
ref_index
]["url"]
else:
ref_url = ""
# 将content中剩余的部分保存到unprocessed_content中
unprocessed_content = "".join(
content[i + 1 :]
)
content = f"[[{citation_stack[10]}]]({ref_url})"
citation_stack = []
skip_outer = False
break
else:
skip_outer = True
elif (
citation_stack_reference[len(citation_stack)]
== ""
):
# 判断是否为数字
if content[i].isdigit():
citation_stack.append(content[i])
skip_outer = True
else:
# 将 citation_stack 中全部元素拼接成字符串
content = "".join(citation_stack) + content
citation_stack = []
elif (
citation_stack_reference[len(citation_stack)]
== "]"
):
# 判断前一位是否为数字
if citation_stack[-1].isdigit():
citation_stack[-1] += content[i]
skip_outer = True
else:
content = "".join(citation_stack) + content
citation_stack = []
else:
if len(citation_stack) > 0:
# 将 citation_stack 中全部元素拼接成字符串
content = "".join(citation_stack) + content
citation_stack = []
if skip_outer:
continue
yield content
except Exception as e:
yield self._format_exception(e)
asyncdef _update_thinking_state(self, delta: dict, thinking_state: dict) -> str:
"""更新思考状态机(简化版)"""
state_output = ""
if thinking_state["thinking"] == -1and delta.get("reasoning_content"):
thinking_state["thinking"] = 0
state_output = "<think>"
elif (
thinking_state["thinking"] == 0
andnot delta.get("reasoning_content")
and delta.get("content")
):
thinking_state["thinking"] = 1
state_output = "\n</think>\n\n"
return state_output
def _process_content(self, delta: dict) -> str:
"""直接返回处理后的内容"""
return delta.get("reasoning_content", "") or delta.get("content", "")
def _emit_status(self, description: str, done: bool = False) -> Awaitable[None]:
"""发送状态更新"""
if self.emitter:
return self.emitter(
{
"type": "status",
"data": {
"description": description,
"done": done,
},
}
)
returnNone
def _format_error(self, status_code: int, error: bytes) -> str:
if isinstance(error, str):
error_str = error
else:
error_str = error.decode(errors="ignore")
try:
err_msg = json.loads(error_str).get("message", error_str)[:200]
except Exception:
err_msg = error_str[:200]
return json.dumps(
{"error": f"HTTP {status_code}: {err_msg}"}, ensure_ascii=False
)
def _format_exception(self, e: Exception) -> str:
tb_lines = traceback.format_exception(type(e), e, e.__traceback__)
detailed_error = "".join(tb_lines)
return json.dumps({"error": detailed_error}, ensure_ascii=False)
为每个人提供最有价值的技术赋能!【公益】大模型技术社区已经上线!
九天&菜菜&菊安酱&木羽老师,30+套原创系统教程,涵盖国内外主流「开&闭源大模型」调用与部署,RAG、Agent、微调实战案例…所有内容免费公开,还将定期追更最新大模型技术进展~
?完整视频讲解+学习课件+项目源码包获取⬇️请点击原文进入赋范大模型技术社区即可领取~
53AI,企业落地大模型首选服务商
产品:场景落地咨询+大模型应用平台+行业解决方案
承诺:免费场景POC验证,效果验证后签署服务协议。零风险落地应用大模型,已交付160+中大型企业
2025-04-06
Gemini 2.5 Pro与Claude 3.7 Sonnet编程性能对比
2025-04-05
20 万行代码:我们如何构建和维护大规模 AI 原型系统
2025-04-05
LM Speed - 简单的大模型测速分析工具核心价值
2025-04-04
选择合适的 llms 以实现最佳性能:2025 年最大化 AI 聊天机器人效能的指南
2025-04-03
Ray 在 Bilibili 的场景探索与落地实践
2025-04-03
Spring AI+DeepSeek R1搭建企业私有化模型工具(直接使用)
2025-04-02
一文看懂预训练、微调和上下文学习
2025-04-02
将复杂HTML变成AI易读Markdown | MCP精选集
2025-02-04
2025-02-04
2024-09-18
2024-07-11
2024-07-09
2024-07-11
2024-07-26
2025-02-05
2025-01-27
2025-02-01
2025-04-01
2025-03-31
2025-03-20
2025-03-16
2025-03-16
2025-03-13
2025-03-13
2025-03-11