微信扫码
添加专属顾问
我要投稿
Dify插件Endpoint:开启Serverless新纪元,自定义API逻辑,打造个性化WebApp。 核心内容: 1. Endpoint插件:Dify v1.0的全新扩展类型,支持自定义API逻辑 2. Serverless特性:Endpoint实现HTTP服务器功能,支持反向调用 3. WebApp模板:通过Endpoint实现风格化定制,打造多样化Chatbot
快速开始:https://docs.dify.ai/zh-hans/plugins/quick-start
<html lang="zh">
<body>
<!-- Header title, displaying ChatBot name -->
<header>
<h1>{{ bot_name }}</h1>
</header>
<div class="chat-container">
<div id="chat-log"></div>
<div class="input-container">
<input type="text" id="user-input" placeholder="Press Enter or click Send after typing" />
<button id="send-btn">Send</button>
<!-- Add "Reset Conversation" button -->
<button id="reset-btn">Reset</button>
</div>
</div>
<script>
// You can customize the bot name
const botName = '{{ bot_name }}';
// Get or generate conversation ID from localStorage to support multi-turn dialogue
let conversationId = localStorage.getItem('conversation_id') || '';
// Get page elements
const chatLog = document.getElementById('chat-log');
const userInput = document.getElementById('user-input');
const sendBtn = document.getElementById('send-btn');
const resetBtn = document.getElementById('reset-btn');
// Bind events to buttons and input
sendBtn.addEventListener('click', sendMessage);
userInput.addEventListener('keypress', function (event) {
// Send message when Enter key is pressed
if (event.key === 'Enter') {
sendMessage();
}
});
// Click reset button
resetBtn.addEventListener('click', resetConversation);
/**
* Send message to backend and handle streaming response
*/
async function sendMessage() {
const message = userInput.value.trim();
if (!message) return;
// Display user message in chat log
appendMessage(message, 'user');
userInput.value = '';
// Prepare request body
const requestBody = {
query: message,
conversation_id: conversationId
};
try {
// Replace with backend streaming API endpoint
const response = await fetch('./pink/talk', {
method: 'POST',
headers: {
'Content-Type': 'application/json'
},
body: JSON.stringify(requestBody)
});
if (!response.ok) {
throw new Error('Network response was not ok');
}
// Create a placeholder for displaying ChatBot reply
let botMessageContainer = appendMessage('', 'bot');
// Read backend response as stream
const reader = response.body.getReader();
const decoder = new TextDecoder('utf-8');
let buffer = '';
while (true) {
const { value, done } = await reader.read();
if (done) break;
buffer += decoder.decode(value, { stream: true });
// Split and process by lines
const lines = buffer.split('\n\n');
buffer = lines.pop() || ''; // Keep the last incomplete line
for (const line of lines) {
if (!line.trim()) continue;
try {
const data = JSON.parse(line);
if (data.answer) {
botMessageContainer.textContent += data.answer;
}
if (data.conversation_id) {
conversationId = data.conversation_id;
localStorage.setItem('conversation_id', conversationId);
}
} catch (error) {
console.error('Error:', error, line);
}
}
}
} catch (error) {
console.error('Error:', error);
appendMessage('Request failed, please try again later.', 'bot');
}
}
/**
* Insert message into chat log
* @param {string} text - Message content
* @param {string} sender - 'user' or 'bot'
* @returns {HTMLElement} - Returns the current inserted message element for later content updates
*/
function appendMessage(text, sender) {
const messageEl = document.createElement('div');
messageEl.className = `message ${sender}`;
// If it's bot, display "Bot Name: Message", otherwise display user message
if (sender === 'bot') {
messageEl.textContent = botName + ': ' + text;
} else {
messageEl.textContent = text; // User message
}
chatLog.appendChild(messageEl);
// Scroll chat log to bottom
chatLog.scrollTop = chatLog.scrollHeight;
return messageEl;
}
/**
* Reset conversation: Clear conversation_id and chat log, initialize example messages
*/
function resetConversation() {
// Remove conversation ID from local storage
localStorage.removeItem('conversation_id');
conversationId = '';
// Clear chat log
chatLog.innerHTML = '';
}
</script>
</body>
</html>
from collections.abc import Mapping
import os
from werkzeug import Request, Response
from dify_plugin import Endpoint
class NekoEndpoint(Endpoint):
def _invoke(self, r: Request, values: Mapping, settings: Mapping) -> Response:
# read file from girls.html using current python file relative path
with open(os.path.join(os.path.dirname(__file__), "girls.html"), "r") as f:
return Response(
f.read().replace("{{ bot_name }}", settings.get("bot_name", "Candy")),
status=200,
content_type="text/html",
)
然后实现一个调用接口的 Endpoint:
from collections.abc import Mapping
import json
from typing import Optional
from werkzeug import Request, Response
from dify_plugin import Endpoint
class GirlsTalk(Endpoint):
def _invoke(self, r: Request, values: Mapping, settings: Mapping) -> Response:
"""
Invokes the endpoint with the given request.
"""
app: Optional[dict] = settings.get("app")
if not app:
return Response("App is required", status=400)
data = r.get_json()
query = data.get("query")
conversation_id = data.get("conversation_id")
if not query:
return Response("Query is required", status=400)
def generator():
response = self.session.app.chat.invoke(
app_id=app.get("app_id"),
query=query,
inputs={},
conversation_id=conversation_id,
response_mode="streaming",
)
for chunk in response:
yield json.dumps(chunk) + "\n\n"
return Response(generator(), status=200, content_type="text/event-stream")
当一切完成后,打开对应的 Endpoint 即可看到这个页面:
settings:- name: api_keytype: secret-inputrequired: truelabel:en_US: API keyzh_Hans: API keypt_BR: API keyplaceholder:en_US: Please input your API keyzh_Hans: 请输入你的 API keypt_BR: Please input your API key- name: llmtype: model-selectorscope: llmrequired: falselabel:en_US: LLMzh_Hans: LLMpt_BR: LLMplaceholder:en_US: Please select a LLMzh_Hans: 请选择一个 LLMpt_BR: Please select a LLM- name: text_embeddingtype: model-selectorscope: text-embeddingrequired: falselabel:en_US: Text Embeddingzh_Hans: 文本嵌入pt_BR: Text Embeddingplaceholder:en_US: Please select a Text Embedding Modelzh_Hans: 请选择一个文本嵌入模型pt_BR: Please select a Text Embedding Modelendpoints:- endpoints/llm.yaml- endpoints/text_embedding.yaml
class OaicompatDifyModelEndpoint(Endpoint):
def _invoke(self, r: Request, values: Mapping, settings: Mapping) -> Response:
"""
Invokes the endpoint with the given request.
"""
llm: Optional[dict] = settings.get("llm")
data = r.get_json(force=True)
prompt_messages: list[PromptMessage] = []
if not isinstance(data.get("messages"), list) or not data.get("messages"):
raise ValueError("Messages is not a list or empty")
for message in data.get("messages", []):
# transform messages
pass
tools: list[PromptMessageTool] = []
if data.get("tools"):
for tool in data.get("tools", []):
tools.append(PromptMessageTool(**tool))
stream: bool = data.get("stream", False)
def generator():
if not stream:
llm_invoke_response = self.session.model.llm.invoke(
model_config=LLMModelConfig(**llm),
prompt_messages=prompt_messages,
tools=tools,
stream=False,
)
yield json.dumps({
"id": "chatcmpl-" + str(uuid.uuid4()),
"object": "chat.completion",
"created": int(time.time()),
"model": llm.get("model"),
"choices": [{
"index": 0,
"message": {
"role": "assistant",
"content": llm_invoke_response.message.content
},
"finish_reason": "stop"
}],
"usage": {
"prompt_tokens": llm_invoke_response.usage.prompt_tokens,
"completion_tokens": llm_invoke_response.usage.completion_tokens,
"total_tokens": llm_invoke_response.usage.total_tokens
}
})
else:
llm_invoke_response = self.session.model.llm.invoke(
model_config=LLMModelConfig(**llm),
prompt_messages=prompt_messages,
tools=tools,
stream=True,
)
for chunk in llm_invoke_response:
yield json.dumps({
"id": "chatcmpl-" + str(uuid.uuid4()),
"object": "chat.completion.chunk",
"created": int(time.time()),
"model": llm.get("model"),
"choices": [{
"index": 0,
"delta": {"content": chunk.delta.message.content},
"finish_reason": None
}]
}) + "\n\n"
return Response(generator(), status=200, content_type="event-stream" if stream else "application/json")
当一切完成后,我们可以使用 curl 命令来测试实现是否成功。
53AI,企业落地大模型首选服务商
产品:场景落地咨询+大模型应用平台+行业解决方案
承诺:免费场景POC验证,效果验证后签署服务协议。零风险落地应用大模型,已交付160+中大型企业
2024-12-24
2024-04-25
2024-07-16
2024-07-20
2024-04-24
2024-05-08
2024-06-21
2024-05-09
2024-08-06
2024-05-07