NithyasriVllB commited on
Commit
2ae38a6
1 Parent(s): 97da34d

Upload 5 files

Browse files
.gitattributes CHANGED
@@ -1,35 +1,35 @@
1
- *.7z filter=lfs diff=lfs merge=lfs -text
2
- *.arrow filter=lfs diff=lfs merge=lfs -text
3
- *.bin filter=lfs diff=lfs merge=lfs -text
4
- *.bz2 filter=lfs diff=lfs merge=lfs -text
5
- *.ckpt filter=lfs diff=lfs merge=lfs -text
6
- *.ftz filter=lfs diff=lfs merge=lfs -text
7
- *.gz filter=lfs diff=lfs merge=lfs -text
8
- *.h5 filter=lfs diff=lfs merge=lfs -text
9
- *.joblib filter=lfs diff=lfs merge=lfs -text
10
- *.lfs.* filter=lfs diff=lfs merge=lfs -text
11
- *.mlmodel filter=lfs diff=lfs merge=lfs -text
12
- *.model filter=lfs diff=lfs merge=lfs -text
13
- *.msgpack filter=lfs diff=lfs merge=lfs -text
14
- *.npy filter=lfs diff=lfs merge=lfs -text
15
- *.npz filter=lfs diff=lfs merge=lfs -text
16
- *.onnx filter=lfs diff=lfs merge=lfs -text
17
- *.ot filter=lfs diff=lfs merge=lfs -text
18
- *.parquet filter=lfs diff=lfs merge=lfs -text
19
- *.pb filter=lfs diff=lfs merge=lfs -text
20
- *.pickle filter=lfs diff=lfs merge=lfs -text
21
- *.pkl filter=lfs diff=lfs merge=lfs -text
22
- *.pt filter=lfs diff=lfs merge=lfs -text
23
- *.pth filter=lfs diff=lfs merge=lfs -text
24
- *.rar filter=lfs diff=lfs merge=lfs -text
25
- *.safetensors filter=lfs diff=lfs merge=lfs -text
26
- saved_model/**/* filter=lfs diff=lfs merge=lfs -text
27
- *.tar.* filter=lfs diff=lfs merge=lfs -text
28
- *.tar filter=lfs diff=lfs merge=lfs -text
29
- *.tflite filter=lfs diff=lfs merge=lfs -text
30
- *.tgz filter=lfs diff=lfs merge=lfs -text
31
- *.wasm filter=lfs diff=lfs merge=lfs -text
32
- *.xz filter=lfs diff=lfs merge=lfs -text
33
- *.zip filter=lfs diff=lfs merge=lfs -text
34
- *.zst filter=lfs diff=lfs merge=lfs -text
35
- *tfevents* filter=lfs diff=lfs merge=lfs -text
 
1
+ *.7z filter=lfs diff=lfs merge=lfs -text
2
+ *.arrow filter=lfs diff=lfs merge=lfs -text
3
+ *.bin filter=lfs diff=lfs merge=lfs -text
4
+ *.bz2 filter=lfs diff=lfs merge=lfs -text
5
+ *.ckpt filter=lfs diff=lfs merge=lfs -text
6
+ *.ftz filter=lfs diff=lfs merge=lfs -text
7
+ *.gz filter=lfs diff=lfs merge=lfs -text
8
+ *.h5 filter=lfs diff=lfs merge=lfs -text
9
+ *.joblib filter=lfs diff=lfs merge=lfs -text
10
+ *.lfs.* filter=lfs diff=lfs merge=lfs -text
11
+ *.mlmodel filter=lfs diff=lfs merge=lfs -text
12
+ *.model filter=lfs diff=lfs merge=lfs -text
13
+ *.msgpack filter=lfs diff=lfs merge=lfs -text
14
+ *.npy filter=lfs diff=lfs merge=lfs -text
15
+ *.npz filter=lfs diff=lfs merge=lfs -text
16
+ *.onnx filter=lfs diff=lfs merge=lfs -text
17
+ *.ot filter=lfs diff=lfs merge=lfs -text
18
+ *.parquet filter=lfs diff=lfs merge=lfs -text
19
+ *.pb filter=lfs diff=lfs merge=lfs -text
20
+ *.pickle filter=lfs diff=lfs merge=lfs -text
21
+ *.pkl filter=lfs diff=lfs merge=lfs -text
22
+ *.pt filter=lfs diff=lfs merge=lfs -text
23
+ *.pth filter=lfs diff=lfs merge=lfs -text
24
+ *.rar filter=lfs diff=lfs merge=lfs -text
25
+ *.safetensors filter=lfs diff=lfs merge=lfs -text
26
+ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
27
+ *.tar.* filter=lfs diff=lfs merge=lfs -text
28
+ *.tar filter=lfs diff=lfs merge=lfs -text
29
+ *.tflite filter=lfs diff=lfs merge=lfs -text
30
+ *.tgz filter=lfs diff=lfs merge=lfs -text
31
+ *.wasm filter=lfs diff=lfs merge=lfs -text
32
+ *.xz filter=lfs diff=lfs merge=lfs -text
33
+ *.zip filter=lfs diff=lfs merge=lfs -text
34
+ *.zst filter=lfs diff=lfs merge=lfs -text
35
+ *tfevents* filter=lfs diff=lfs merge=lfs -text
.github/workflows/check_file_size.yml ADDED
@@ -0,0 +1,16 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ name: Check file size
2
+ on: # or directly `on: [push]` to run the action on every push on any branch
3
+ pull_request:
4
+ branches: [main]
5
+
6
+ # to run this workflow manually from the Actions tab
7
+ workflow_dispatch:
8
+
9
+ jobs:
10
+ sync-to-hub:
11
+ runs-on: ubuntu-latest
12
+ steps:
13
+ - name: Check large files
14
+ uses: ActionsDesk/[email protected]
15
+ with:
16
+ filesizelimit: 10485760
.github/workflows/huggingface.yml ADDED
@@ -0,0 +1,20 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ name: Sync to Hugging Face hub
2
+ on:
3
+ push:
4
+ branches: [main]
5
+
6
+ # to run this workflow manually from the Actions tab
7
+ workflow_dispatch:
8
+
9
+ jobs:
10
+ sync-to-hub:
11
+ runs-on: ubuntu-latest
12
+ steps:
13
+ - uses: actions/checkout@v3
14
+ with:
15
+ fetch-depth: 0
16
+ lfs: true
17
+ - name: Push to hub
18
+ env:
19
+ HF_TOKEN: ${{ secrets.HF_TOKEN }}
20
+ run: git push -f https://HarshanaLF:[email protected]/spaces/HarshanaLF/Real-Time-Chat-with-AI main
README.md CHANGED
@@ -1,13 +1,78 @@
1
- ---
2
- title: Chat Flash Sel Model
3
- emoji: 📉
4
- colorFrom: red
5
- colorTo: indigo
6
- sdk: gradio
7
- sdk_version: 5.5.0
8
- app_file: app.py
9
- pinned: false
10
- short_description: we can select model and get speedy results
11
- ---
12
-
13
- Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ title: Real Time Chat With AI
3
+ emoji:
4
+ colorFrom: blue
5
+ colorTo: indigo
6
+ sdk: gradio
7
+ sdk_version: 4.31.5
8
+ app_file: app.py
9
+ pinned: false
10
+ short_description: Chat with AI with ⚡Lightning Speed
11
+ ---
12
+
13
+ # H GO
14
+
15
+ Inspired by Google Go, H GO is a concise and efficient chat interface that leverages various language models hosted on Hugging Face. It uses Gradio for the user interface and supports different models to cater to diverse needs.
16
+
17
+ ## Features
18
+
19
+ - **Multiple Model Support**: Choose from various models such as Nous Hermes, StarChat, Mistral, and Phi.
20
+ - **Real-time Interaction**: Get quick and concise responses from the selected model.
21
+ - **Customizable**: Easily switch models to suit your specific requirements.
22
+
23
+ ## Setup
24
+
25
+ ### Prerequisites
26
+
27
+ - Python 3.7+
28
+ - Gradio
29
+ - Hugging Face Hub
30
+ - Git LFS (Large File Storage)
31
+
32
+ ### Installation
33
+
34
+ 1. Clone the repository:
35
+
36
+ ```bash
37
+ # Make sure you have git-lfs installed (https://git-lfs.com)
38
+ git lfs install
39
+
40
+ # When prompted for a password, use an access token with write permissions.
41
+ # Generate one from your settings: https://huggingface.co/settings/tokens
42
+ git clone https://huggingface.co/spaces/HarshanaLF/Real-Time-Chat-with-AI
43
+
44
+ # If you want to clone without large files - just their pointers
45
+ GIT_LFS_SKIP_SMUDGE=1 git clone https://huggingface.co/spaces/HarshanaLF/Real-Time-Chat-with-AI
46
+ ```
47
+
48
+ 2. Navigate to the project directory:
49
+
50
+ ```bash
51
+ cd Real-Time-Chat-with-AI
52
+ ```
53
+
54
+ 3. Install the required dependencies:
55
+
56
+ ```bash
57
+ pip install gradio huggingface_hub
58
+ ```
59
+
60
+ ## Usage
61
+
62
+ Run the application using the following command:
63
+
64
+ ```bash
65
+ python app.py
66
+ ```
67
+
68
+ ## Model Descriptions
69
+
70
+ - **Nous Hermes Mixtral 8x7B DPO**: A robust model designed for detailed and nuanced conversation handling.
71
+ - **StarChat2 15b**: A large-scale model optimized for general chat interactions with a wide range of topics.
72
+ - **Mistral 7B v0.3**: A smaller, efficient model suitable for fast and responsive chat applications.
73
+ - **Phi 3 mini**: A compact model focusing on instructive and concise responses.
74
+ - **Mixtral 8x7B**: A versatile model capable of handling various conversational contexts effectively.
75
+
76
+ ```
77
+
78
+ ```
app.py ADDED
@@ -0,0 +1,57 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import gradio as gr
2
+ from huggingface_hub import InferenceClient
3
+
4
+ # Function to return the appropriate client based on the model selected
5
+ def client_fn(model):
6
+ model_map = {
7
+ "Nous Hermes Mixtral 8x7B DPO": "NousResearch/Nous-Hermes-2-Mixtral-8x7B-DPO",
8
+ "StarChat2 15b": "HuggingFaceH4/starchat2-15b-v0.1",
9
+ "Mistral 7B v0.3": "mistralai/Mistral-7B-Instruct-v0.3",
10
+ "Phi 3 mini": "microsoft/Phi-3-mini-4k-instruct",
11
+ "Mixtral 8x7B": "mistralai/Mixtral-8x7B-Instruct-v0.1"
12
+ }
13
+ return InferenceClient(model_map.get(model, "mistralai/Mixtral-8x7B-Instruct-v0.1"))
14
+
15
+ system_instructions = ("[SYSTEM] You are a chat bot named 'H go'."
16
+ "Your task is to Answer the question."
17
+ "Keep conversation very short, clear and concise."
18
+ "Respond naturally and concisely to the user's queries. "
19
+ "The expectation is that you will avoid introductions and start answering the query directly, Only answer the question asked by user, Do not say unnecessary things."
20
+ "Begin with a greeting if the user initiates the conversation. "
21
+ "Here is the user's query:[QUESTION] ")
22
+
23
+ # Function to generate model responses
24
+ def models(text, model="Mixtral 8x7B"):
25
+ client = client_fn(model)
26
+ generate_kwargs = {
27
+ "max_new_tokens": 100,
28
+ "do_sample": True,
29
+ }
30
+
31
+ formatted_prompt = f"{system_instructions} {text} [ANSWER]"
32
+ stream = client.text_generation(formatted_prompt, **generate_kwargs, stream=True, details=True, return_full_text=False)
33
+
34
+ output = ""
35
+ for response in stream:
36
+ output += response.token.text
37
+ if output.endswith("</s>"):
38
+ output = output[:-4]
39
+ return output
40
+
41
+ # Gradio interface description and configuration
42
+ description = """# H GO
43
+ ### Inspired from Google Go"""
44
+
45
+ with gr.Blocks() as demo:
46
+ gr.Markdown(description)
47
+
48
+ text_input = gr.Textbox(label="Enter your message here:")
49
+ dropdown = gr.Dropdown(['Mixtral 8x7B', 'Nous Hermes Mixtral 8x7B DPO', 'StarChat2 15b', 'Mistral 7B v0.3', 'Phi 3 mini'], value="Mistral 7B v0.3", label="Select Model")
50
+ submit_btn = gr.Button("Send")
51
+ output_text = gr.Textbox(label="Response")
52
+
53
+ submit_btn.click(fn=models, inputs=[text_input, dropdown], outputs=output_text)
54
+
55
+ # Queue and launch configuration for Gradio
56
+ demo.queue(max_size=300000)
57
+ demo.launch()