rohankaran commited on
Commit
13d0c78
1 Parent(s): ce9e876

Adapt guardrails_models to handle input/output blocking scenarios

Browse files

The guardrails_models has been updated to handle scenario where inputs or outputs are blocked by the 'Language Model' (LLM). It now also includes a specific exception 'StopCandidateException' for cases where output is blocked by the LLM. Some of the available models in the 'get_all_models' function have been commented out.

Files changed (1) hide show
  1. guardrails_models.py +46 -42
guardrails_models.py CHANGED
@@ -3,7 +3,7 @@ import random
3
  from typing import List, Optional
4
 
5
  import openai
6
- from google.generativeai.types import BlockedPromptException
7
  from langchain_community.chat_models import ChatAnyscale
8
  from langchain_core.messages import AIMessage, HumanMessage, SystemMessage
9
  from langchain_google_genai import ChatGoogleGenerativeAI
@@ -124,7 +124,9 @@ def gemini_pro(
124
  for message in ai_message:
125
  yield message.content
126
  except BlockedPromptException:
127
- yield "⚠️ I'm sorry, I cannot respond to that. (The LLM blocked this message)"
 
 
128
 
129
 
130
  ### LLAMA GUARD ###
@@ -264,6 +266,8 @@ def gemini_pro_llamaguard(
264
  for message in response:
265
  yield message
266
  except BlockedPromptException:
 
 
267
  yield "⚠️ I'm sorry, I cannot respond to that. (The output was blocked by the LLM)"
268
 
269
 
@@ -389,50 +393,50 @@ def gemini_pro_nemoguardrails(
389
 
390
  def get_all_models():
391
  return [
392
- {
393
- "name": "gpt3.5-turbo-1106",
394
- "model": gpt35_turbo,
395
- },
396
- {
397
- "name": "Llama-2-70b-chat-hf",
398
- "model": llama70B,
399
- },
400
- {
401
- "name": "Mixtral-8x7B-Instruct-v0.1",
402
- "model": mixtral7x8,
403
- },
404
  {
405
  "name": "Gemini-Pro",
406
  "model": gemini_pro,
407
  },
408
- {
409
- "name": "gpt3.5-turbo-1106 + Llama Guard",
410
- "model": gpt35_turbo_llamaguard,
411
- },
412
- {
413
- "name": "Llama-2-70b-chat-hf + Llama Guard",
414
- "model": llama70B_llamaguard,
415
- },
416
- {
417
- "name": "Mixtral-8x7B-Instruct-v0.1 + Llama Guard",
418
- "model": mixtral7x8_llamaguard,
419
- },
420
- {
421
- "name": "Gemini-Pro + Llama Guard",
422
- "model": gemini_pro_llamaguard,
423
- },
424
- {
425
- "name": "gpt3.5-turbo-1106 + NeMo Guardrails",
426
- "model": gpt35_turbo_nemoguardrails,
427
- },
428
- {
429
- "name": "Llama-2-70b-chat-hf + NeMo Guardrails",
430
- "model": llama70B_nemoguardrails,
431
- },
432
- {
433
- "name": "Mixtral-8x7B-Instruct-v0.1 + NeMo Guardrails",
434
- "model": mixtral7x8_nemoguardrails,
435
- },
436
  {
437
  "name": "Gemini-Pro + NeMo Guardrails",
438
  "model": gemini_pro_nemoguardrails,
 
3
  from typing import List, Optional
4
 
5
  import openai
6
+ from google.generativeai.types import BlockedPromptException, StopCandidateException
7
  from langchain_community.chat_models import ChatAnyscale
8
  from langchain_core.messages import AIMessage, HumanMessage, SystemMessage
9
  from langchain_google_genai import ChatGoogleGenerativeAI
 
124
  for message in ai_message:
125
  yield message.content
126
  except BlockedPromptException:
127
+ yield "⚠️ I'm sorry, I cannot respond to that. (The input was blocked by the LLM)"
128
+ except StopCandidateException:
129
+ yield "⚠️ I'm sorry, I cannot respond to that. (The output was blocked by the LLM)"
130
 
131
 
132
  ### LLAMA GUARD ###
 
266
  for message in response:
267
  yield message
268
  except BlockedPromptException:
269
+ yield "⚠️ I'm sorry, I cannot respond to that. (The input was blocked by the LLM)"
270
+ except StopCandidateException:
271
  yield "⚠️ I'm sorry, I cannot respond to that. (The output was blocked by the LLM)"
272
 
273
 
 
393
 
394
  def get_all_models():
395
  return [
396
+ # {
397
+ # "name": "gpt3.5-turbo-1106",
398
+ # "model": gpt35_turbo,
399
+ # },
400
+ # {
401
+ # "name": "Llama-2-70b-chat-hf",
402
+ # "model": llama70B,
403
+ # },
404
+ # {
405
+ # "name": "Mixtral-8x7B-Instruct-v0.1",
406
+ # "model": mixtral7x8,
407
+ # },
408
  {
409
  "name": "Gemini-Pro",
410
  "model": gemini_pro,
411
  },
412
+ # {
413
+ # "name": "gpt3.5-turbo-1106 + Llama Guard",
414
+ # "model": gpt35_turbo_llamaguard,
415
+ # },
416
+ # {
417
+ # "name": "Llama-2-70b-chat-hf + Llama Guard",
418
+ # "model": llama70B_llamaguard,
419
+ # },
420
+ # {
421
+ # "name": "Mixtral-8x7B-Instruct-v0.1 + Llama Guard",
422
+ # "model": mixtral7x8_llamaguard,
423
+ # },
424
+ # {
425
+ # "name": "Gemini-Pro + Llama Guard",
426
+ # "model": gemini_pro_llamaguard,
427
+ # },
428
+ # {
429
+ # "name": "gpt3.5-turbo-1106 + NeMo Guardrails",
430
+ # "model": gpt35_turbo_nemoguardrails,
431
+ # },
432
+ # {
433
+ # "name": "Llama-2-70b-chat-hf + NeMo Guardrails",
434
+ # "model": llama70B_nemoguardrails,
435
+ # },
436
+ # {
437
+ # "name": "Mixtral-8x7B-Instruct-v0.1 + NeMo Guardrails",
438
+ # "model": mixtral7x8_nemoguardrails,
439
+ # },
440
  {
441
  "name": "Gemini-Pro + NeMo Guardrails",
442
  "model": gemini_pro_nemoguardrails,