# 缓存设置:设置为true以启用缓存 cache: true # 自定义端点的定义 endpoints: custom: - name: "openAI" apiKey: "${OPENAI_API_KEY}" baseURL: "${OPENAI_REVERSE_PROXY}" models: default: ["gpt-4o", "gpt-4o-mini"] fetch: true titleConvo: true titleModel: "gpt-4o-mini" summarize: false summaryModel: "gpt-4o-mini" modelDisplayLabel: "ChatGPT" dropParams: ["stop", "user", "frequency_penalty", "presence_penalty"] - name: "assistants" apiKey: "${ANTHROPIC_API_KEY}" baseURL: "${ANTHROPIC_REVERSE_PROXY}" models: default: ["claude-3-opus", "claude-3-sonnet", "claude-3-haiku"] fetch: true titleConvo: true modelDisplayLabel: "Claude" dropParams: ["stop", "user", "frequency_penalty", "presence_penalty"] - name: "google" apiKey: "${GOOGLE_KEY}" baseURL: "${GOOGLE_REVERSE_PROXY}" models: default: [ "gemini-pro-1.5", "gemini-pro","gemini-1.5-flash-latest"] fetch: true titleConvo: true modelDisplayLabel: "Gemini" dropParams: ["stop", "user", "frequency_penalty", "presence_penalty"] - name: 'HuggingFace' apiKey: '${HUGGINGFACE_TOKEN}' baseURL: 'https://api-inference.huggingface.co/v1' models: default: [ "codellama/CodeLlama-34b-Instruct-hf", "google/gemma-1.1-2b-it", "google/gemma-1.1-7b-it", "HuggingFaceH4/starchat2-15b-v0.1", "HuggingFaceH4/zephyr-7b-beta", "meta-llama/Meta-Llama-3-8B-Instruct", "microsoft/Phi-3-mini-4k-instruct", "mistralai/Mistral-7B-Instruct-v0.1", "mistralai/Mistral-7B-Instruct-v0.2", "mistralai/Mixtral-8x7B-Instruct-v0.1", "NousResearch/Nous-Hermes-2-Mixtral-8x7B-DPO", ] fetch: true titleConvo: true titleModel: "NousResearch/Nous-Hermes-2-Mixtral-8x7B-DPO" dropParams: ["top_p"] modelDisplayLabel: "HuggingFace" - name: "cohere" apiKey: "${COHERE_API_KEY}" baseURL: "https://api.cohere.ai/v1" models: default: ["command-r","command-r-plus","command-light","command-light-nightly","command","command-nightly"] fetch: false modelDisplayLabel: "cohere" titleModel: "command" dropParams: ["stop", "user", "frequency_penalty", "presence_penalty", "temperature", "top_p"]