Germano Cavalcante
commited on
Commit
•
a8144f6
1
Parent(s):
7b6b8b6
Initial commit
Browse files
Dockerfile
ADDED
@@ -0,0 +1,29 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Use the Ubuntu base image
|
2 |
+
FROM ubuntu
|
3 |
+
|
4 |
+
# Install required packages
|
5 |
+
RUN apt-get update && apt-get install -y \
|
6 |
+
curl \
|
7 |
+
bash
|
8 |
+
|
9 |
+
# Install Ollama
|
10 |
+
RUN curl -fsSL https://ollama.com/install.sh | sh
|
11 |
+
|
12 |
+
# Create the /app directory and set permissions
|
13 |
+
RUN mkdir /app && chmod 777 /app
|
14 |
+
|
15 |
+
# Copy only the necessary files into the container
|
16 |
+
COPY Modelfile /app/Modelfile
|
17 |
+
COPY start.sh /app/start.sh
|
18 |
+
|
19 |
+
# Set the working directory
|
20 |
+
WORKDIR /app
|
21 |
+
|
22 |
+
# Make sure the start.sh script is executable
|
23 |
+
RUN chmod +x start.sh
|
24 |
+
|
25 |
+
# Set the default command to run the start.sh script
|
26 |
+
CMD ["/bin/bash", "start.sh"]
|
27 |
+
|
28 |
+
# Expose the default port used by the application
|
29 |
+
EXPOSE 11434
|
Modelfile
ADDED
@@ -0,0 +1,13 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
FROM ./llama.gguf
|
2 |
+
|
3 |
+
SYSTEM """You are a helpful AI assistant. Respond to users accurately."""
|
4 |
+
|
5 |
+
TEMPLATE """{{ if .System }}<|im_start|>system
|
6 |
+
{{ .System }}<|im_end|>
|
7 |
+
{{ end }}{{ if .Prompt }}<|im_start|>user
|
8 |
+
{{ .Prompt }}<|im_end|>
|
9 |
+
{{ end }}<|im_start|>assistant
|
10 |
+
"""
|
11 |
+
|
12 |
+
PARAMETER stop <|im_start|>
|
13 |
+
PARAMETER stop <|im_end|>
|
README.md
CHANGED
@@ -1,11 +1,13 @@
|
|
1 |
---
|
2 |
title: Ollama
|
3 |
-
emoji:
|
4 |
colorFrom: gray
|
5 |
colorTo: red
|
6 |
sdk: docker
|
7 |
pinned: false
|
8 |
license: apache-2.0
|
|
|
|
|
9 |
---
|
10 |
|
11 |
Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
|
|
|
1 |
---
|
2 |
title: Ollama
|
3 |
+
emoji: 🦙
|
4 |
colorFrom: gray
|
5 |
colorTo: red
|
6 |
sdk: docker
|
7 |
pinned: false
|
8 |
license: apache-2.0
|
9 |
+
app_port: 11434
|
10 |
+
base_path: /
|
11 |
---
|
12 |
|
13 |
Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
|
start.sh
ADDED
@@ -0,0 +1,20 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#!/bin/bash
|
2 |
+
|
3 |
+
# Set environment variables for the ollama server
|
4 |
+
export OLLAMA_HOST=0.0.0.0
|
5 |
+
export OLLAMA_ORIGINS=https://projects.blender.org
|
6 |
+
|
7 |
+
# Start the Ollama service in the background
|
8 |
+
ollama serve &
|
9 |
+
|
10 |
+
# Wait for the service to initialize
|
11 |
+
sleep 10
|
12 |
+
|
13 |
+
# Download the required file
|
14 |
+
curl -fsSL https://huggingface.co/lmstudio-community/Llama-3-Groq-8B-Tool-Use-GGUF/resolve/main/Llama-3-Groq-8B-Tool-Use-Q4_K_M.gguf?download=true -o llama.gguf
|
15 |
+
|
16 |
+
# Create the model using Ollama
|
17 |
+
ollama create llama3.1 -f Modelfile
|
18 |
+
|
19 |
+
# Keep the container running indefinitely
|
20 |
+
tail -f /dev/null
|