Spaces:
Sleeping
Sleeping
Add GitHub Models integration
Browse files- Dockerfile +1 -0
- azure-models.txt +25 -0
- start.sh +15 -4
Dockerfile
CHANGED
@@ -11,5 +11,6 @@ RUN sed -i "s|set_cookie(|set_cookie(samesite='none',secure=True,|g" backend/ope
|
|
11 |
RUN pip install "litellm[proxy]==1.47.0" && chown -R 1000:0 /app
|
12 |
USER 1000:0
|
13 |
|
|
|
14 |
COPY ./start.sh /start.sh
|
15 |
CMD [ "bash", "/start.sh" ]
|
|
|
11 |
RUN pip install "litellm[proxy]==1.47.0" && chown -R 1000:0 /app
|
12 |
USER 1000:0
|
13 |
|
14 |
+
COPY ./azure-models.txt /assets/azure-models.txt
|
15 |
COPY ./start.sh /start.sh
|
16 |
CMD [ "bash", "/start.sh" ]
|
azure-models.txt
ADDED
@@ -0,0 +1,25 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
AI21-Jamba-Instruct
|
2 |
+
Cohere-command-r
|
3 |
+
Cohere-command-r-plus
|
4 |
+
Cohere-embed-v3-english
|
5 |
+
Cohere-embed-v3-multilingual
|
6 |
+
Meta-Llama-3-70B-Instruct
|
7 |
+
Meta-Llama-3-8B-Instruct
|
8 |
+
Meta-Llama-3.1-405B-Instruct
|
9 |
+
Meta-Llama-3.1-70B-Instruct
|
10 |
+
Meta-Llama-3.1-8B-Instruct
|
11 |
+
Mistral-large
|
12 |
+
Mistral-large-2407
|
13 |
+
Mistral-Nemo
|
14 |
+
Mistral-small
|
15 |
+
gpt-4o
|
16 |
+
gpt-4o-mini
|
17 |
+
text-embedding-3-large
|
18 |
+
text-embedding-3-small
|
19 |
+
Phi-3-medium-128k-instruct
|
20 |
+
Phi-3-medium-4k-instruct
|
21 |
+
Phi-3-mini-128k-instruct
|
22 |
+
Phi-3-mini-4k-instruct
|
23 |
+
Phi-3-small-128k-instruct
|
24 |
+
Phi-3-small-8k-instruct
|
25 |
+
Phi-3.5-mini-instruct
|
start.sh
CHANGED
@@ -3,21 +3,28 @@ set -euo pipefail
|
|
3 |
|
4 |
pids=()
|
5 |
|
6 |
-
|
7 |
-
|
8 |
key_env="$2"
|
9 |
|
10 |
if [[ -n "${!key_env:-}" ]]; then
|
11 |
>&2 echo "[!] Found key ${key_env} for ${provider}"
|
12 |
cat <<EOF
|
13 |
-
- model_name: "${
|
14 |
litellm_params:
|
15 |
-
model: "${
|
16 |
api_key: "os.environ/${key_env}"
|
17 |
EOF
|
18 |
fi
|
19 |
}
|
20 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
21 |
generate_litellm_config() {
|
22 |
cat <<EOF
|
23 |
model_list:
|
@@ -29,6 +36,10 @@ EOF
|
|
29 |
generate_litellm_provider "perplexity" "PERPLEXITY_API_KEY"
|
30 |
generate_litellm_provider "anthropic" "ANTHROPIC_API_KEY"
|
31 |
|
|
|
|
|
|
|
|
|
32 |
if [[ -n "${LITELLM_MODELS_BASE64:-}" ]]; then
|
33 |
echo "${LITELLM_MODELS_BASE64}" | base64 -d
|
34 |
fi
|
|
|
3 |
|
4 |
pids=()
|
5 |
|
6 |
+
generate_litellm_model() {
|
7 |
+
model="$1"
|
8 |
key_env="$2"
|
9 |
|
10 |
if [[ -n "${!key_env:-}" ]]; then
|
11 |
>&2 echo "[!] Found key ${key_env} for ${provider}"
|
12 |
cat <<EOF
|
13 |
+
- model_name: "${model}"
|
14 |
litellm_params:
|
15 |
+
model: "${model}"
|
16 |
api_key: "os.environ/${key_env}"
|
17 |
EOF
|
18 |
fi
|
19 |
}
|
20 |
|
21 |
+
generate_litellm_provider() {
|
22 |
+
provider="$1"
|
23 |
+
key_env="$2"
|
24 |
+
|
25 |
+
generate_litellm_model "${provider}/*" "${key_env}"
|
26 |
+
}
|
27 |
+
|
28 |
generate_litellm_config() {
|
29 |
cat <<EOF
|
30 |
model_list:
|
|
|
36 |
generate_litellm_provider "perplexity" "PERPLEXITY_API_KEY"
|
37 |
generate_litellm_provider "anthropic" "ANTHROPIC_API_KEY"
|
38 |
|
39 |
+
while read -r model; do
|
40 |
+
generate_litellm_model "github/${model}" "GITHUB_API_KEY"
|
41 |
+
done </assets/azure-models.txt
|
42 |
+
|
43 |
if [[ -n "${LITELLM_MODELS_BASE64:-}" ]]; then
|
44 |
echo "${LITELLM_MODELS_BASE64}" | base64 -d
|
45 |
fi
|