dh-mc commited on
Commit
bccc8dd
1 Parent(s): b5c7e68

added support for openllm

Browse files
Files changed (1) hide show
  1. app_modules/llm_loader.py +9 -1
app_modules/llm_loader.py CHANGED
@@ -8,7 +8,7 @@ import torch
8
  from langchain.callbacks.base import BaseCallbackHandler
9
  from langchain.callbacks.streaming_stdout import StreamingStdOutCallbackHandler
10
  from langchain.chat_models import ChatOpenAI
11
- from langchain.llms import GPT4All, HuggingFacePipeline, LlamaCpp
12
  from langchain.schema import LLMResult
13
  from transformers import (
14
  AutoConfig,
@@ -156,6 +156,14 @@ class LLMLoader:
156
  verbose=True,
157
  temperature=0,
158
  )
 
 
 
 
 
 
 
 
159
  elif self.llm_model_type.startswith("gpt4all"):
160
  MODEL_PATH = ensure_model_is_downloaded(self.llm_model_type)
161
  self.llm = GPT4All(
 
8
  from langchain.callbacks.base import BaseCallbackHandler
9
  from langchain.callbacks.streaming_stdout import StreamingStdOutCallbackHandler
10
  from langchain.chat_models import ChatOpenAI
11
+ from langchain.llms import GPT4All, HuggingFacePipeline, LlamaCpp, OpenLLM
12
  from langchain.schema import LLMResult
13
  from transformers import (
14
  AutoConfig,
 
156
  verbose=True,
157
  temperature=0,
158
  )
159
+ elif self.llm_model_type == "openllm":
160
+ server_url = os.environ.get("OPENLLM_SERVER_URL")
161
+ print(f" server url: {server_url}")
162
+ self.llm = OpenLLM(
163
+ server_url=server_url,
164
+ callbacks=callbacks,
165
+ verbose=True,
166
+ )
167
  elif self.llm_model_type.startswith("gpt4all"):
168
  MODEL_PATH = ensure_model_is_downloaded(self.llm_model_type)
169
  self.llm = GPT4All(