Spaces:
Running
Running
Update app.py
Browse files
app.py
CHANGED
@@ -1,13 +1,18 @@
|
|
1 |
import streamlit as st
|
2 |
from langchain.prompts import PromptTemplate
|
3 |
from langchain.llms import CTransformers
|
|
|
4 |
|
5 |
## Function To get response from LLAma 2 model
|
6 |
|
7 |
def getLLamaresponse(input_text,no_words,blog_style):
|
8 |
|
|
|
|
|
|
|
|
|
9 |
### LLama2 model
|
10 |
-
llm=CTransformers(model=
|
11 |
model_type='llama',
|
12 |
config={'max_new_tokens':256,
|
13 |
'temperature':0.01})
|
|
|
1 |
import streamlit as st
|
2 |
from langchain.prompts import PromptTemplate
|
3 |
from langchain.llms import CTransformers
|
4 |
+
from transformers import AutoModel
|
5 |
|
6 |
## Function To get response from LLAma 2 model
|
7 |
|
8 |
def getLLamaresponse(input_text,no_words,blog_style):
|
9 |
|
10 |
+
# Load model directly
|
11 |
+
|
12 |
+
model = AutoModel.from_pretrained("TheBloke/Llama-2-7B-Chat-GGML")
|
13 |
+
|
14 |
### LLama2 model
|
15 |
+
llm=CTransformers(model=model,
|
16 |
model_type='llama',
|
17 |
config={'max_new_tokens':256,
|
18 |
'temperature':0.01})
|