pplx2api / app.py
smgc's picture
Update app.py
546eb40 verified
raw
history blame
14 kB
import os
import json
import uuid
from datetime import datetime
from flask import Flask, request, Response, jsonify
import socketio
import requests
import logging
from threading import Event
import tiktoken # 引入 tiktoken 库
from tiktoken import Encoding
def local_encoding_for_model(model_name: str):
"""
从本地加载编码文件并返回一个 Encoding 对象。
"""
local_encoding_path = '/app/cl100k_base.tiktoken'
if os.path.exists(local_encoding_path):
with open(local_encoding_path, 'rb') as f:
encoding_data = f.read() # 读取本地编码文件的字节内容
# 构造一个 Encoding 对象
return Encoding(
name="cl100k_base", # 编码的名称
pat_str="", # 正则表达式(如果有)
mergeable_ranks={}, # 合并的 rank 数据(通常是从文件或其他地方加载)
special_tokens={}, # 特殊 token 映射
explicit_n_vocab=None # 可选的词汇表大小
)
else:
raise FileNotFoundError(f"Local encoding file not found at {local_encoding_path}")
# 替换 tiktoken 的 encoding_for_model 函数
tiktoken.encoding_for_model = local_encoding_for_model
app = Flask(__name__)
logging.basicConfig(level=logging.INFO)
# 从环境变量中获取API密钥
API_KEY = os.environ.get('PPLX_KEY')
# 代理设置
proxy_url = os.environ.get('PROXY_URL')
# 设置代理
if proxy_url:
proxies = {
'http': proxy_url,
'https': proxy_url
}
transport = requests.Session()
transport.proxies.update(proxies)
else:
transport = None
sio = socketio.Client(http_session=transport, logger=True, engineio_logger=True)
# 连接选项
connect_opts = {
'transports': ['websocket', 'polling'], # 允许回退到轮询
}
# 其他选项
sio_opts = {
'extraHeaders': {
'Cookie': os.environ.get('PPLX_COOKIE'),
'User-Agent': os.environ.get('USER_AGENT'),
'Accept': '*/*',
'priority': 'u=1, i',
'Referer': 'https://www.perplexity.ai/',
}
}
def log_request(ip, route, status):
timestamp = datetime.now().isoformat()
logging.info(f"{timestamp} - {ip} - {route} - {status}")
def validate_api_key():
api_key = request.headers.get('x-api-key')
if api_key != API_KEY:
log_request(request.remote_addr, request.path, 401)
return jsonify({"error": "Invalid API key"}), 401
return None
def normalize_content(content):
"""
递归处理 msg['content'],确保其为字符串。
如果 content 是字典或列表,将其转换为字符串。
"""
if isinstance(content, str):
return content
elif isinstance(content, dict):
# 将字典转化为 JSON 字符串
return json.dumps(content, ensure_ascii=False)
elif isinstance(content, list):
# 对于列表,递归处理每个元素
return " ".join([normalize_content(item) for item in content])
else:
# 如果是其他类型,返回空字符串
return ""
def calculate_tokens_via_tiktoken(text, model="gpt-3.5-turbo"):
"""
使用 tiktoken 库根据 GPT 模型计算 token 数量。
Claude 模型与 GPT 模型的 token 计算机制类似,因此可以使用 tiktoken。
"""
encoding = tiktoken.encoding_for_model(model) # 获取模型的编码器
tokens = encoding.encode(text) # 对文本进行 tokenization
return len(tokens)
@app.route('/')
def root():
log_request(request.remote_addr, request.path, 200)
return jsonify({
"message": "Welcome to the Perplexity AI Proxy API",
"endpoints": {
"/ai/v1/messages": {
"method": "POST",
"description": "Send a message to the AI",
"headers": {
"x-api-key": "Your API key (required)",
"Content-Type": "application/json"
},
"body": {
"messages": "Array of message objects",
"stream": "Boolean (true for streaming response)",
"model": "Model to be used (optional, defaults to claude-3-opus-20240229)"
}
}
}
})
@app.route('/ai/v1/messages', methods=['POST'])
def messages():
auth_error = validate_api_key()
if auth_error:
return auth_error
try:
json_body = request.json
model = json_body.get('model', 'claude-3-opus-20240229') # 动态获取模型,默认 claude-3-opus-20240229
stream = json_body.get('stream', True) # 默认为True
# 使用 normalize_content 递归处理 msg['content']
previous_messages = "\n\n".join([normalize_content(msg['content']) for msg in json_body['messages']])
# 动态计算输入的 token 数量,使用 tiktoken 进行 tokenization
input_tokens = calculate_tokens_via_tiktoken(previous_messages, model="gpt-3.5-turbo")
msg_id = str(uuid.uuid4())
response_event = Event()
response_text = []
if not stream:
# 处理 stream 为 false 的情况
return handle_non_stream(previous_messages, msg_id, model, input_tokens)
# 记录日志:此时请求上下文仍然有效
log_request(request.remote_addr, request.path, 200)
def generate():
yield create_event("message_start", {
"type": "message_start",
"message": {
"id": msg_id,
"type": "message",
"role": "assistant",
"content": [],
"model": model, # 动态模型
"stop_reason": None,
"stop_sequence": None,
"usage": {"input_tokens": input_tokens, "output_tokens": 1}, # 动态 input_tokens
},
})
yield create_event("content_block_start", {"type": "content_block_start", "index": 0, "content_block": {"type": "text", "text": ""}})
yield create_event("ping", {"type": "ping"})
def on_connect():
logging.info("Connected to Perplexity AI")
emit_data = {
"version": "2.9",
"source": "default",
"attachments": [],
"language": "en-GB",
"timezone": "Europe/London",
"mode": "concise",
"is_related_query": False,
"is_default_related_query": False,
"visitor_id": str(uuid.uuid4()),
"frontend_context_uuid": str(uuid.uuid4()),
"prompt_source": "user",
"query_source": "home"
}
sio.emit('perplexity_ask', (previous_messages, emit_data))
def on_query_progress(data):
nonlocal response_text
if 'text' in data:
text = json.loads(data['text'])
chunk = text['chunks'][-1] if text['chunks'] else None
if chunk:
response_text.append(chunk)
# 检查是否是最终响应
if data.get('final', False):
response_event.set()
def on_query_complete(data):
response_event.set()
def on_disconnect():
logging.info("Disconnected from Perplexity AI")
response_event.set()
def on_connect_error(data):
logging.error(f"Connection error: {data}")
response_text.append(f"Error connecting to Perplexity AI: {data}")
response_event.set()
sio.on('connect', on_connect)
sio.on('query_progress', on_query_progress)
sio.on('query_complete', on_query_complete)
sio.on('disconnect', on_disconnect)
sio.on('connect_error', on_connect_error)
try:
sio.connect('wss://www.perplexity.ai/', **connect_opts, headers=sio_opts['extraHeaders'])
while not response_event.is_set():
sio.sleep(0.1)
while response_text:
chunk = response_text.pop(0)
yield create_event("content_block_delta", {
"type": "content_block_delta",
"index": 0,
"delta": {"type": "text_delta", "text": chunk},
})
except Exception as e:
logging.error(f"Error during socket connection: {str(e)}")
yield create_event("content_block_delta", {
"type": "content_block_delta",
"index": 0,
"delta": {"type": "text_delta", "text": f"Error during socket connection: {str(e)}"},
})
finally:
if sio.connected:
sio.disconnect()
# 动态计算输出的 token 数量,使用 tiktoken 进行 tokenization
output_tokens = calculate_tokens_via_tiktoken(''.join(response_text), model="gpt-3.5-turbo")
yield create_event("content_block_stop", {"type": "content_block_stop", "index": 0})
yield create_event("message_delta", {
"type": "message_delta",
"delta": {"stop_reason": "end_turn", "stop_sequence": None},
"usage": {"input_tokens": input_tokens, "output_tokens": output_tokens}, # 动态 output_tokens
})
yield create_event("message_stop", {"type": "message_stop"}) # 确保发送 message_stop 事件
return Response(generate(), content_type='text/event-stream')
except Exception as e:
logging.error(f"Request error: {str(e)}")
log_request(request.remote_addr, request.path, 400)
return jsonify({"error": str(e)}), 400
def handle_non_stream(previous_messages, msg_id, model, input_tokens):
"""
处理 stream 为 false 的情况,返回完整的响应。
"""
try:
response_event = Event()
response_text = []
def on_connect():
logging.info("Connected to Perplexity AI")
emit_data = {
"version": "2.9",
"source": "default",
"attachments": [],
"language": "en-GB",
"timezone": "Europe/London",
"mode": "concise",
"is_related_query": False,
"is_default_related_query": False,
"visitor_id": str(uuid.uuid4()),
"frontend_context_uuid": str(uuid.uuid4()),
"prompt_source": "user",
"query_source": "home"
}
sio.emit('perplexity_ask', (previous_messages, emit_data))
def on_query_progress(data):
nonlocal response_text
if 'text' in data:
text = json.loads(data['text'])
chunk = text['chunks'][-1] if text['chunks'] else None
if chunk:
response_text.append(chunk)
# 检查是否是最终响应
if data.get('final', False):
response_event.set()
def on_disconnect():
logging.info("Disconnected from Perplexity AI")
response_event.set()
def on_connect_error(data):
logging.error(f"Connection error: {data}")
response_text.append(f"Error connecting to Perplexity AI: {data}")
response_event.set()
sio.on('connect', on_connect)
sio.on('query_progress', on_query_progress)
sio.on('disconnect', on_disconnect)
sio.on('connect_error', on_connect_error)
sio.connect('wss://www.perplexity.ai/', **connect_opts, headers=sio_opts['extraHeaders'])
# 等待响应完成
response_event.wait(timeout=30)
# 动态计算输出的 token 数量,使用 tiktoken 进行 tokenization
output_tokens = calculate_tokens_via_tiktoken(''.join(response_text), model="gpt-3.5-turbo")
# 生成完整的响应
full_response = {
"content": [{"text": ''.join(response_text), "type": "text"}], # 合并所有文本块
"id": msg_id,
"model": model, # 动态模型
"role": "assistant",
"stop_reason": "end_turn",
"stop_sequence": None,
"type": "message",
"usage": {
"input_tokens": input_tokens, # 动态 input_tokens
"output_tokens": output_tokens, # 动态 output_tokens
},
}
return Response(json.dumps(full_response, ensure_ascii=False), content_type='application/json')
except Exception as e:
logging.error(f"Error during socket connection: {str(e)}")
return jsonify({"error": str(e)}), 500
finally:
if sio.connected:
sio.disconnect()
@app.errorhandler(404)
def not_found(error):
log_request(request.remote_addr, request.path, 404)
return "Not Found", 404
@app.errorhandler(500)
def server_error(error):
logging.error(f"Server error: {str(error)}")
log_request(request.remote_addr, request.path, 500)
return "Something broke!", 500
def create_event(event, data):
if isinstance(data, dict):
data = json.dumps(data, ensure_ascii=False) # 确保中文不会被转义
return f"event: {event}\ndata: {data}\n\n"
if __name__ == '__main__':
port = int(os.environ.get('PORT', 8081))
logging.info(f"Perplexity proxy listening on port {port}")
if not API_KEY:
logging.warning("Warning: PPLX_KEY environment variable is not set. API key validation will fail.")
app.run(host='0.0.0.0', port=port)