prompt
stringlengths 70
19.8k
| completion
stringlengths 8
303
| api
stringlengths 23
93
|
---|---|---|
get_ipython().run_line_magic('pip', 'install llama-index-embeddings-openai')
get_ipython().run_line_magic('pip', 'install llama-index-readers-file')
get_ipython().run_line_magic('pip', 'install llama-index-llms-openai')
import camelot
from llama_index.core import VectorStoreIndex
from llama_index.core.query_engine import PandasQueryEngine
from llama_index.core.schema import IndexNode
from llama_index.llms.openai import OpenAI
from llama_index.readers.file import PyMuPDFReader
from typing import List
import os
os.environ["OPENAI_API_KEY"] = "YOUR_API_KEY"
from llama_index.embeddings.openai import OpenAIEmbedding
from llama_index.llms.openai import OpenAI
from llama_index.core import Settings
Settings.llm = OpenAI(model="gpt-3.5-turbo")
Settings.embed_model = OpenAIEmbedding(model="text-embedding-3-small")
file_path = "billionaires_page.pdf"
reader = PyMuPDFReader()
docs = reader.load(file_path)
def get_tables(path: str, pages: List[int]):
table_dfs = []
for page in pages:
table_list = camelot.read_pdf(path, pages=str(page))
table_df = table_list[0].df
table_df = (
table_df.rename(columns=table_df.iloc[0])
.drop(table_df.index[0])
.reset_index(drop=True)
)
table_dfs.append(table_df)
return table_dfs
table_dfs = get_tables(file_path, pages=[3, 25])
table_dfs[0]
table_dfs[1]
llm = OpenAI(model="gpt-4")
df_query_engines = [
PandasQueryEngine(table_df, llm=llm) for table_df in table_dfs
]
response = df_query_engines[0].query(
"What's the net worth of the second richest billionaire in 2023?"
)
print(str(response))
response = df_query_engines[1].query(
"How many billionaires were there in 2009?"
)
print(str(response))
from llama_index.core import Settings
doc_nodes = Settings.node_parser.get_nodes_from_documents(docs)
summaries = [
(
"This node provides information about the world's richest billionaires"
" in 2023"
),
(
"This node provides information on the number of billionaires and"
" their combined net worth from 2000 to 2023."
),
]
df_nodes = [
IndexNode(text=summary, index_id=f"pandas{idx}")
for idx, summary in enumerate(summaries)
]
df_id_query_engine_mapping = {
f"pandas{idx}": df_query_engine
for idx, df_query_engine in enumerate(df_query_engines)
}
vector_index = VectorStoreIndex(doc_nodes + df_nodes)
vector_retriever = vector_index.as_retriever(similarity_top_k=1)
vector_index0 = | VectorStoreIndex(doc_nodes) | llama_index.core.VectorStoreIndex |
get_ipython().run_line_magic('pip', 'install llama-index-llms-openai')
get_ipython().system('pip install llama-index')
import nest_asyncio
nest_asyncio.apply()
get_ipython().system("wget 'https://raw.githubusercontent.com/jerryjliu/llama_index/main/examples/gatsby/gatsby_full.txt' -O 'gatsby_full.txt'")
from llama_index.core import SimpleDirectoryReader
documents = SimpleDirectoryReader(
input_files=["./gatsby_full.txt"]
).load_data()
from llama_index.llms.openai import OpenAI
from llama_index.core import Settings
Settings.llm = OpenAI(model="gpt-3.5-turbo")
Settings.chunk_size = 1024
nodes = Settings.node_parser.get_nodes_from_documents(documents)
from llama_index.core import StorageContext
storage_context = StorageContext.from_defaults()
storage_context.docstore.add_documents(nodes)
from llama_index.core import SimpleKeywordTableIndex, VectorStoreIndex
keyword_index = SimpleKeywordTableIndex(
nodes,
storage_context=storage_context,
show_progress=True,
)
vector_index = VectorStoreIndex(
nodes,
storage_context=storage_context,
show_progress=True,
)
from llama_index.core import PromptTemplate
QA_PROMPT_TMPL = (
"Context information is below.\n"
"---------------------\n"
"{context_str}\n"
"---------------------\n"
"Given the context information and not prior knowledge, "
"answer the question. If the answer is not in the context, inform "
"the user that you can't answer the question - DO NOT MAKE UP AN ANSWER.\n"
"In addition to returning the answer, also return a relevance score as to "
"how relevant the answer is to the question. "
"Question: {query_str}\n"
"Answer (including relevance score): "
)
QA_PROMPT = PromptTemplate(QA_PROMPT_TMPL)
keyword_query_engine = keyword_index.as_query_engine(
text_qa_template=QA_PROMPT
)
vector_query_engine = vector_index.as_query_engine(text_qa_template=QA_PROMPT)
response = vector_query_engine.query(
"Describe and summarize the interactions between Gatsby and Daisy"
)
print(response)
response = keyword_query_engine.query(
"Describe and summarize the interactions between Gatsby and Daisy"
)
print(response)
from llama_index.core.tools import QueryEngineTool
keyword_tool = QueryEngineTool.from_defaults(
query_engine=keyword_query_engine,
description="Useful for answering questions about this essay",
)
vector_tool = QueryEngineTool.from_defaults(
query_engine=vector_query_engine,
description="Useful for answering questions about this essay",
)
from llama_index.core.query_engine import RouterQueryEngine
from llama_index.core.selectors import LLMSingleSelector, LLMMultiSelector
from llama_index.core.selectors import (
PydanticMultiSelector,
PydanticSingleSelector,
)
from llama_index.core.response_synthesizers import TreeSummarize
TREE_SUMMARIZE_PROMPT_TMPL = (
"Context information from multiple sources is below. Each source may or"
" may not have \na relevance score attached to"
" it.\n---------------------\n{context_str}\n---------------------\nGiven"
" the information from multiple sources and their associated relevance"
" scores (if provided) and not prior knowledge, answer the question. If"
" the answer is not in the context, inform the user that you can't answer"
" the question.\nQuestion: {query_str}\nAnswer: "
)
tree_summarize = TreeSummarize(
summary_template=PromptTemplate(TREE_SUMMARIZE_PROMPT_TMPL)
)
query_engine = RouterQueryEngine(
selector= | LLMMultiSelector.from_defaults() | llama_index.core.selectors.LLMMultiSelector.from_defaults |
get_ipython().run_line_magic('pip', 'install llama-index-vector-stores-pinecone')
get_ipython().system('pip install llama-index>=0.9.31 pinecone-client>=3.0.0')
import logging
import sys
import os
logging.basicConfig(stream=sys.stdout, level=logging.INFO)
logging.getLogger().addHandler(logging.StreamHandler(stream=sys.stdout))
from pinecone import Pinecone, ServerlessSpec
os.environ[
"PINECONE_API_KEY"
] = "<Your Pinecone API key, from app.pinecone.io>"
os.environ["OPENAI_API_KEY"] = "sk-..."
api_key = os.environ["PINECONE_API_KEY"]
pc = Pinecone(api_key=api_key)
pc.create_index(
name="quickstart",
dimension=1536,
metric="euclidean",
spec=ServerlessSpec(cloud="aws", region="us-west-2"),
)
pinecone_index = pc.Index("quickstart")
from llama_index.core import VectorStoreIndex, SimpleDirectoryReader
from llama_index.vector_stores.pinecone import PineconeVectorStore
from IPython.display import Markdown, display
get_ipython().system("mkdir -p 'data/paul_graham/'")
get_ipython().system("wget 'https://raw.githubusercontent.com/run-llama/llama_index/main/docs/examples/data/paul_graham/paul_graham_essay.txt' -O 'data/paul_graham/paul_graham_essay.txt'")
documents = | SimpleDirectoryReader("./data/paul_graham") | llama_index.core.SimpleDirectoryReader |
get_ipython().run_line_magic('pip', 'install llama-index-llms-bedrock')
get_ipython().system('pip install llama-index')
from llama_index.llms.bedrock import Bedrock
profile_name = "Your aws profile name"
resp = Bedrock(
model="amazon.titan-text-express-v1", profile_name=profile_name
).complete("Paul Graham is ")
print(resp)
from llama_index.core.llms import ChatMessage
from llama_index.llms.bedrock import Bedrock
messages = [
ChatMessage(
role="system", content="You are a pirate with a colorful personality"
),
ChatMessage(role="user", content="Tell me a story"),
]
resp = Bedrock(
model="amazon.titan-text-express-v1", profile_name=profile_name
).chat(messages)
print(resp)
from llama_index.llms.bedrock import Bedrock
llm = | Bedrock(model="amazon.titan-text-express-v1", profile_name=profile_name) | llama_index.llms.bedrock.Bedrock |
get_ipython().system('pip install llama-index')
import os
os.environ["OPENAI_API_KEY"] = "sk-..."
get_ipython().system("mkdir -p 'data/paul_graham/'")
get_ipython().system("wget 'https://raw.githubusercontent.com/run-llama/llama_index/main/docs/examples/data/paul_graham/paul_graham_essay.txt' -O 'data/paul_graham/paul_graham_essay.txt'")
from llama_index.core import SimpleDirectoryReader
documents = SimpleDirectoryReader("./data/paul_graham").load_data()
from llama_index.core import Settings
nodes = Settings.get_nodes_from_documents(documents)
from llama_index.core import StorageContext
storage_context = StorageContext.from_defaults()
storage_context.docstore.add_documents(nodes)
from llama_index.core import SimpleKeywordTableIndex, VectorStoreIndex
vector_index = VectorStoreIndex(nodes, storage_context=storage_context)
keyword_index = SimpleKeywordTableIndex(nodes, storage_context=storage_context)
from llama_index.core import QueryBundle
from llama_index.core.schema import NodeWithScore
from llama_index.core.retrievers import (
BaseRetriever,
VectorIndexRetriever,
KeywordTableSimpleRetriever,
)
from typing import List
class CustomRetriever(BaseRetriever):
"""Custom retriever that performs both semantic search and hybrid search."""
def __init__(
self,
vector_retriever: VectorIndexRetriever,
keyword_retriever: KeywordTableSimpleRetriever,
mode: str = "AND",
) -> None:
"""Init params."""
self._vector_retriever = vector_retriever
self._keyword_retriever = keyword_retriever
if mode not in ("AND", "OR"):
raise ValueError("Invalid mode.")
self._mode = mode
super().__init__()
def _retrieve(self, query_bundle: QueryBundle) -> List[NodeWithScore]:
"""Retrieve nodes given query."""
vector_nodes = self._vector_retriever.retrieve(query_bundle)
keyword_nodes = self._keyword_retriever.retrieve(query_bundle)
vector_ids = {n.node.node_id for n in vector_nodes}
keyword_ids = {n.node.node_id for n in keyword_nodes}
combined_dict = {n.node.node_id: n for n in vector_nodes}
combined_dict.update({n.node.node_id: n for n in keyword_nodes})
if self._mode == "AND":
retrieve_ids = vector_ids.intersection(keyword_ids)
else:
retrieve_ids = vector_ids.union(keyword_ids)
retrieve_nodes = [combined_dict[rid] for rid in retrieve_ids]
return retrieve_nodes
from llama_index.core import get_response_synthesizer
from llama_index.core.query_engine import RetrieverQueryEngine
vector_retriever = VectorIndexRetriever(index=vector_index, similarity_top_k=2)
keyword_retriever = KeywordTableSimpleRetriever(index=keyword_index)
custom_retriever = CustomRetriever(vector_retriever, keyword_retriever)
response_synthesizer = | get_response_synthesizer() | llama_index.core.get_response_synthesizer |
get_ipython().run_line_magic('pip', 'install llama-index-vector-stores-chroma')
get_ipython().system('pip install llama-index')
import logging
import sys
logging.basicConfig(stream=sys.stdout, level=logging.INFO)
logging.getLogger().addHandler(logging.StreamHandler(stream=sys.stdout))
import os
import getpass
import openai
openai.api_key = "sk-"
import chromadb
chroma_client = chromadb.EphemeralClient()
chroma_collection = chroma_client.create_collection("quickstart")
from llama_index.core import VectorStoreIndex
from llama_index.vector_stores.chroma import ChromaVectorStore
from IPython.display import Markdown, display
from llama_index.core.schema import TextNode
nodes = [
TextNode(
text="The Shawshank Redemption",
metadata={
"author": "Stephen King",
"theme": "Friendship",
"year": 1994,
},
),
TextNode(
text="The Godfather",
metadata={
"director": "Francis Ford Coppola",
"theme": "Mafia",
"year": 1972,
},
),
TextNode(
text="Inception",
metadata={
"director": "Christopher Nolan",
"theme": "Fiction",
"year": 2010,
},
),
TextNode(
text="To Kill a Mockingbird",
metadata={
"author": "Harper Lee",
"theme": "Mafia",
"year": 1960,
},
),
TextNode(
text="1984",
metadata={
"author": "George Orwell",
"theme": "Totalitarianism",
"year": 1949,
},
),
TextNode(
text="The Great Gatsby",
metadata={
"author": "F. Scott Fitzgerald",
"theme": "The American Dream",
"year": 1925,
},
),
TextNode(
text="Harry Potter and the Sorcerer's Stone",
metadata={
"author": "J.K. Rowling",
"theme": "Fiction",
"year": 1997,
},
),
]
from llama_index.core import StorageContext
vector_store = ChromaVectorStore(chroma_collection=chroma_collection)
storage_context = StorageContext.from_defaults(vector_store=vector_store)
index = VectorStoreIndex(nodes, storage_context=storage_context)
from llama_index.core.vector_stores import (
MetadataFilter,
MetadataFilters,
FilterOperator,
)
filters = MetadataFilters(
filters=[
MetadataFilter(key="theme", operator=FilterOperator.EQ, value="Mafia"),
]
)
retriever = index.as_retriever(filters=filters)
retriever.retrieve("What is inception about?")
from llama_index.core.vector_stores import ExactMatchFilter, MetadataFilters
filters = MetadataFilters(
filters=[
MetadataFilter(key="theme", value="Mafia"),
MetadataFilter(key="year", value=1972),
]
)
retriever = index.as_retriever(filters=filters)
retriever.retrieve("What is inception about?")
from llama_index.core.vector_stores import FilterOperator, FilterCondition
filters = MetadataFilters(
filters=[
| MetadataFilter(key="theme", value="Fiction") | llama_index.core.vector_stores.MetadataFilter |
get_ipython().run_line_magic('pip', 'install llama-index-embeddings-openai')
get_ipython().run_line_magic('pip', 'install llama-index-readers-file')
get_ipython().run_line_magic('pip', 'install llama-index-llms-openai')
import camelot
from llama_index.core import VectorStoreIndex
from llama_index.core.query_engine import PandasQueryEngine
from llama_index.core.schema import IndexNode
from llama_index.llms.openai import OpenAI
from llama_index.readers.file import PyMuPDFReader
from typing import List
import os
os.environ["OPENAI_API_KEY"] = "YOUR_API_KEY"
from llama_index.embeddings.openai import OpenAIEmbedding
from llama_index.llms.openai import OpenAI
from llama_index.core import Settings
Settings.llm = | OpenAI(model="gpt-3.5-turbo") | llama_index.llms.openai.OpenAI |
get_ipython().run_line_magic('pip', 'install llama-index-storage-docstore-mongodb')
get_ipython().run_line_magic('pip', 'install llama-index-vector-stores-qdrant')
get_ipython().run_line_magic('pip', 'install llama-index-storage-docstore-firestore')
get_ipython().run_line_magic('pip', 'install llama-index-retrievers-bm25')
get_ipython().run_line_magic('pip', 'install llama-index-storage-docstore-redis')
get_ipython().run_line_magic('pip', 'install llama-index-storage-docstore-dynamodb')
get_ipython().run_line_magic('pip', 'install llama-index-readers-file')
get_ipython().system('wget --user-agent "Mozilla" "https://arxiv.org/pdf/2307.09288.pdf" -O "./llama2.pdf"')
get_ipython().system('wget --user-agent "Mozilla" "https://arxiv.org/pdf/1706.03762.pdf" -O "./attention.pdf"')
from llama_index.core import download_loader
from llama_index.readers.file import PyMuPDFReader
llama2_docs = PyMuPDFReader().load_data(
file_path="./llama2.pdf", metadata=True
)
attention_docs = PyMuPDFReader().load_data(
file_path="./attention.pdf", metadata=True
)
import os
os.environ["OPENAI_API_KEY"] = "sk-..."
from llama_index.core.node_parser import TokenTextSplitter
nodes = TokenTextSplitter(
chunk_size=1024, chunk_overlap=128
).get_nodes_from_documents(llama2_docs + attention_docs)
from llama_index.core.storage.docstore import SimpleDocumentStore
from llama_index.storage.docstore.redis import RedisDocumentStore
from llama_index.storage.docstore.mongodb import MongoDocumentStore
from llama_index.storage.docstore.firestore import FirestoreDocumentStore
from llama_index.storage.docstore.dynamodb import DynamoDBDocumentStore
docstore = SimpleDocumentStore()
docstore.add_documents(nodes)
from llama_index.core import VectorStoreIndex, StorageContext
from llama_index.retrievers.bm25 import BM25Retriever
from llama_index.vector_stores.qdrant import QdrantVectorStore
from qdrant_client import QdrantClient
client = QdrantClient(path="./qdrant_data")
vector_store = QdrantVectorStore("composable", client=client)
storage_context = StorageContext.from_defaults(vector_store=vector_store)
index = VectorStoreIndex(nodes=nodes)
vector_retriever = index.as_retriever(similarity_top_k=2)
bm25_retriever = BM25Retriever.from_defaults(
docstore=docstore, similarity_top_k=2
)
from llama_index.core.schema import IndexNode
vector_obj = IndexNode(
index_id="vector", obj=vector_retriever, text="Vector Retriever"
)
bm25_obj = IndexNode(
index_id="bm25", obj=bm25_retriever, text="BM25 Retriever"
)
from llama_index.core import SummaryIndex
summary_index = SummaryIndex(objects=[vector_obj, bm25_obj])
query_engine = summary_index.as_query_engine(
response_mode="tree_summarize", verbose=True
)
response = await query_engine.aquery(
"How does attention work in transformers?"
)
print(str(response))
response = await query_engine.aquery(
"What is the architecture of Llama2 based on?"
)
print(str(response))
response = await query_engine.aquery(
"What was used before attention in transformers?"
)
print(str(response))
docstore.persist("./docstore.json")
from llama_index.core.storage.docstore import SimpleDocumentStore
from llama_index.vector_stores.qdrant import QdrantVectorStore
from qdrant_client import QdrantClient
docstore = | SimpleDocumentStore.from_persist_path("./docstore.json") | llama_index.core.storage.docstore.SimpleDocumentStore.from_persist_path |
from llama_index.agent import OpenAIAgent
import openai
openai.api_key = "sk-api-key"
from llama_index.tools.gmail.base import GmailToolSpec
from llama_index.tools.google_calendar.base import GoogleCalendarToolSpec
from llama_index.tools.google_search.base import GoogleSearchToolSpec
gmail_tools = GmailToolSpec().to_tool_list()
gcal_tools = GoogleCalendarToolSpec().to_tool_list()
gsearch_tools = GoogleSearchToolSpec(key="api-key", engine="engine").to_tool_list()
for tool in [*gmail_tools, *gcal_tools, *gsearch_tools]:
print(tool.metadata.name)
print(tool.metadata.description)
from llama_index.tools.tool_spec.load_and_search.base import LoadAndSearchToolSpec
print("Wrapping " + gsearch_tools[0].metadata.name)
gsearch_load_and_search_tools = LoadAndSearchToolSpec.from_defaults(
gsearch_tools[0],
).to_tool_list()
print("Wrapping gmail " + gmail_tools[0].metadata.name)
gmail_load_and_search_tools = LoadAndSearchToolSpec.from_defaults(
gmail_tools[0],
).to_tool_list()
print("Wrapping google calendar " + gcal_tools[0].metadata.name)
gcal_load_and_search_tools = LoadAndSearchToolSpec.from_defaults(
gcal_tools[0],
).to_tool_list()
all_tools = [
*gsearch_load_and_search_tools,
*gmail_load_and_search_tools,
*gcal_load_and_search_tools,
*gcal_tools[1::],
*gmail_tools[1::],
*gsearch_tools[1::],
]
agent = | OpenAIAgent.from_tools(all_tools, verbose=True) | llama_index.agent.OpenAIAgent.from_tools |
get_ipython().run_line_magic('pip', 'install llama-index-agent-openai')
get_ipython().run_line_magic('pip', 'install llama-index-llms-openai')
get_ipython().system('pip install llama-index')
import json
from typing import Sequence, List
from llama_index.llms.openai import OpenAI
from llama_index.core.llms import ChatMessage
from llama_index.core.tools import BaseTool, FunctionTool
from llama_index.agent.openai import OpenAIAgent
def add(a: int, b: int) -> int:
"""Add two integers and returns the result integer"""
return a + b
add_tool = | FunctionTool.from_defaults(fn=add) | llama_index.core.tools.FunctionTool.from_defaults |
get_ipython().run_line_magic('pip', 'install llama-index-agent-openai')
get_ipython().run_line_magic('pip', 'install llama-index-llms-openai')
from llama_index.agent.openai import OpenAIAgent
from llama_index.llms.openai import OpenAI
from llama_index.core.tools import BaseTool, FunctionTool
def multiply(a: int, b: int) -> int:
"""Multiple two integers and returns the result integer"""
return a * b
multiply_tool = FunctionTool.from_defaults(fn=multiply)
def add(a: int, b: int) -> int:
"""Add two integers and returns the result integer"""
return a + b
add_tool = FunctionTool.from_defaults(fn=add)
llm = | OpenAI(model="gpt-3.5-turbo-1106") | llama_index.llms.openai.OpenAI |
get_ipython().run_line_magic('pip', 'install llama-hub-llama-packs-agents-llm-compiler-step')
get_ipython().run_line_magic('pip', 'install llama-index-readers-wikipedia')
get_ipython().run_line_magic('pip', 'install llama-index-llms-openai')
import phoenix as px
px.launch_app()
import llama_index.core
llama_index.core.set_global_handler("arize_phoenix")
import nest_asyncio
nest_asyncio.apply()
from llama_index.packs.agents.llm_compiler.step import LLMCompilerAgentWorker
from llama_index.core.llama_pack import download_llama_pack
download_llama_pack(
"LLMCompilerAgentPack",
"./agent_pack",
skip_load=True,
)
from agent_pack.step import LLMCompilerAgentWorker
import json
from typing import Sequence, List
from llama_index.llms.openai import OpenAI
from llama_index.core.llms import ChatMessage
from llama_index.core.tools import BaseTool, FunctionTool
import nest_asyncio
nest_asyncio.apply()
def multiply(a: int, b: int) -> int:
"""Multiple two integers and returns the result integer"""
return a * b
multiply_tool = FunctionTool.from_defaults(fn=multiply)
def add(a: int, b: int) -> int:
"""Add two integers and returns the result integer"""
return a + b
add_tool = FunctionTool.from_defaults(fn=add)
tools = [multiply_tool, add_tool]
multiply_tool.metadata.fn_schema_str
from llama_index.core.agent import AgentRunner
llm = OpenAI(model="gpt-4")
callback_manager = llm.callback_manager
agent_worker = LLMCompilerAgentWorker.from_tools(
tools, llm=llm, verbose=True, callback_manager=callback_manager
)
agent = AgentRunner(agent_worker, callback_manager=callback_manager)
response = agent.chat("What is (121 * 3) + 42?")
response
agent.memory.get_all()
get_ipython().system('pip install llama-index-readers-wikipedia')
from llama_index.readers.wikipedia import WikipediaReader
wiki_titles = ["Toronto", "Seattle", "Chicago", "Boston", "Miami"]
city_docs = {}
reader = | WikipediaReader() | llama_index.readers.wikipedia.WikipediaReader |
get_ipython().run_line_magic('pip', 'install llama-index-llms-openai')
get_ipython().run_line_magic('pip', 'install llama-index-llms-huggingface')
get_ipython().system('pip install llama-index')
import logging
import sys
logging.basicConfig(stream=sys.stdout, level=logging.INFO)
logging.getLogger().addHandler(logging.StreamHandler(stream=sys.stdout))
from llama_index.core.postprocessor import (
PIINodePostprocessor,
NERPIINodePostprocessor,
)
from llama_index.llms.huggingface import HuggingFaceLLM
from llama_index.core import Document, VectorStoreIndex
from llama_index.core.schema import TextNode
text = """
Hello Paulo Santos. The latest statement for your credit card account \
1111-0000-1111-0000 was mailed to 123 Any Street, Seattle, WA 98109.
"""
node = TextNode(text=text)
processor = NERPIINodePostprocessor()
from llama_index.core.schema import NodeWithScore
new_nodes = processor.postprocess_nodes([NodeWithScore(node=node)])
new_nodes[0].node.get_text()
new_nodes[0].node.metadata["__pii_node_info__"]
from llama_index.llms.openai import OpenAI
processor = PIINodePostprocessor(llm=OpenAI())
from llama_index.core.schema import NodeWithScore
new_nodes = processor.postprocess_nodes([ | NodeWithScore(node=node) | llama_index.core.schema.NodeWithScore |
get_ipython().run_line_magic('pip', 'install llama-index-llms-openai')
get_ipython().run_line_magic('pip', 'install llama-index-readers-file')
get_ipython().run_line_magic('pip', 'install llama-index-program-openai')
get_ipython().system('pip install llama-index')
get_ipython().system('pip install "unstructured[msg]"')
import logging
import sys, json
logging.basicConfig(stream=sys.stdout, level=logging.INFO)
logging.getLogger().addHandler(logging.StreamHandler(stream=sys.stdout))
import os
import openai
openai.api_key = os.environ["OPENAI_API_KEY"]
from pydantic import BaseModel, Field
from typing import List
class Instrument(BaseModel):
"""Datamodel for ticker trading details."""
direction: str = Field(description="ticker trading - Buy, Sell, Hold etc")
ticker: str = Field(
description="Stock Ticker. 1-4 character code. Example: AAPL, TSLS, MSFT, VZ"
)
company_name: str = Field(
description="Company name corresponding to ticker"
)
shares_traded: float = Field(description="Number of shares traded")
percent_of_etf: float = Field(description="Percentage of ETF")
class Etf(BaseModel):
"""ETF trading data model"""
etf_ticker: str = Field(
description="ETF Ticker code. Example: ARKK, FSPTX"
)
trade_date: str = Field(description="Date of trading")
stocks: List[Instrument] = Field(
description="List of instruments or shares traded under this etf"
)
class EmailData(BaseModel):
"""Data model for email extracted information."""
etfs: List[Etf] = Field(
description="List of ETFs described in email having list of shares traded under it"
)
trade_notification_date: str = Field(
description="Date of trade notification"
)
sender_email_id: str = Field(description="Email Id of the email sender.")
email_date_time: str = Field(description="Date and time of email")
from llama_index.core import download_loader
from llama_index.readers.file import UnstructuredReader
loader = | UnstructuredReader() | llama_index.readers.file.UnstructuredReader |
get_ipython().run_line_magic('pip', 'install llama-index-llms-openai')
get_ipython().system('pip install llama-index')
get_ipython().system("mkdir -p 'data/paul_graham/'")
get_ipython().system("wget 'https://raw.githubusercontent.com/run-llama/llama_index/main/docs/examples/data/paul_graham/paul_graham_essay.txt' -O 'data/paul_graham/paul_graham_essay.txt'")
import openai
import os
os.environ["OPENAI_API_KEY"] = "sk-..."
openai.api_key = os.environ["OPENAI_API_KEY"]
from llama_index.core import VectorStoreIndex, SimpleDirectoryReader
from llama_index.llms.openai import OpenAI
llm = OpenAI(model="gpt-3.5-turbo")
data = SimpleDirectoryReader(input_dir="./data/paul_graham/").load_data()
index = VectorStoreIndex.from_documents(data)
from llama_index.core.memory import ChatMemoryBuffer
memory = | ChatMemoryBuffer.from_defaults(token_limit=3900) | llama_index.core.memory.ChatMemoryBuffer.from_defaults |
from llama_index.core import SQLDatabase
from sqlalchemy import (
create_engine,
MetaData,
Table,
Column,
String,
Integer,
select,
column,
)
engine = create_engine("sqlite:///chinook.db")
sql_database = SQLDatabase(engine)
from llama_index.core.query_pipeline import QueryPipeline
get_ipython().run_line_magic('pip', 'install llama-index-llms-openai')
get_ipython().system('curl "https://www.sqlitetutorial.net/wp-content/uploads/2018/03/chinook.zip" -O ./chinook.zip')
get_ipython().system('unzip ./chinook.zip')
from llama_index.core.settings import Settings
from llama_index.core.callbacks import CallbackManager
callback_manager = CallbackManager()
Settings.callback_manager = callback_manager
import phoenix as px
import llama_index.core
px.launch_app()
llama_index.core.set_global_handler("arize_phoenix")
from llama_index.core.query_engine import NLSQLTableQueryEngine
from llama_index.core.tools import QueryEngineTool
sql_query_engine = NLSQLTableQueryEngine(
sql_database=sql_database,
tables=["albums", "tracks", "artists"],
verbose=True,
)
sql_tool = QueryEngineTool.from_defaults(
query_engine=sql_query_engine,
name="sql_tool",
description=(
"Useful for translating a natural language query into a SQL query"
),
)
from llama_index.core.query_pipeline import QueryPipeline as QP
qp = QP(verbose=True)
from llama_index.core.agent.react.types import (
ActionReasoningStep,
ObservationReasoningStep,
ResponseReasoningStep,
)
from llama_index.core.agent import Task, AgentChatResponse
from llama_index.core.query_pipeline import (
AgentInputComponent,
AgentFnComponent,
CustomAgentComponent,
QueryComponent,
ToolRunnerComponent,
)
from llama_index.core.llms import MessageRole
from typing import Dict, Any, Optional, Tuple, List, cast
def agent_input_fn(task: Task, state: Dict[str, Any]) -> Dict[str, Any]:
"""Agent input function.
Returns:
A Dictionary of output keys and values. If you are specifying
src_key when defining links between this component and other
components, make sure the src_key matches the specified output_key.
"""
if "current_reasoning" not in state:
state["current_reasoning"] = []
reasoning_step = ObservationReasoningStep(observation=task.input)
state["current_reasoning"].append(reasoning_step)
return {"input": task.input}
agent_input_component = AgentInputComponent(fn=agent_input_fn)
from llama_index.core.agent import ReActChatFormatter
from llama_index.core.query_pipeline import InputComponent, Link
from llama_index.core.llms import ChatMessage
from llama_index.core.tools import BaseTool
def react_prompt_fn(
task: Task, state: Dict[str, Any], input: str, tools: List[BaseTool]
) -> List[ChatMessage]:
chat_formatter = ReActChatFormatter()
return chat_formatter.format(
tools,
chat_history=task.memory.get() + state["memory"].get_all(),
current_reasoning=state["current_reasoning"],
)
react_prompt_component = AgentFnComponent(
fn=react_prompt_fn, partial_dict={"tools": [sql_tool]}
)
from typing import Set, Optional
from llama_index.core.agent.react.output_parser import ReActOutputParser
from llama_index.core.llms import ChatResponse
from llama_index.core.agent.types import Task
def parse_react_output_fn(
task: Task, state: Dict[str, Any], chat_response: ChatResponse
):
"""Parse ReAct output into a reasoning step."""
output_parser = ReActOutputParser()
reasoning_step = output_parser.parse(chat_response.message.content)
return {"done": reasoning_step.is_done, "reasoning_step": reasoning_step}
parse_react_output = AgentFnComponent(fn=parse_react_output_fn)
def run_tool_fn(
task: Task, state: Dict[str, Any], reasoning_step: ActionReasoningStep
):
"""Run tool and process tool output."""
tool_runner_component = ToolRunnerComponent(
[sql_tool], callback_manager=task.callback_manager
)
tool_output = tool_runner_component.run_component(
tool_name=reasoning_step.action,
tool_input=reasoning_step.action_input,
)
observation_step = ObservationReasoningStep(observation=str(tool_output))
state["current_reasoning"].append(observation_step)
return {"response_str": observation_step.get_content(), "is_done": False}
run_tool = AgentFnComponent(fn=run_tool_fn)
def process_response_fn(
task: Task, state: Dict[str, Any], response_step: ResponseReasoningStep
):
"""Process response."""
state["current_reasoning"].append(response_step)
response_str = response_step.response
state["memory"].put(ChatMessage(content=task.input, role=MessageRole.USER))
state["memory"].put(
ChatMessage(content=response_str, role=MessageRole.ASSISTANT)
)
return {"response_str": response_str, "is_done": True}
process_response = AgentFnComponent(fn=process_response_fn)
def process_agent_response_fn(
task: Task, state: Dict[str, Any], response_dict: dict
):
"""Process agent response."""
return (
AgentChatResponse(response_dict["response_str"]),
response_dict["is_done"],
)
process_agent_response = AgentFnComponent(fn=process_agent_response_fn)
from llama_index.core.query_pipeline import QueryPipeline as QP
from llama_index.llms.openai import OpenAI
qp.add_modules(
{
"agent_input": agent_input_component,
"react_prompt": react_prompt_component,
"llm": OpenAI(model="gpt-4-1106-preview"),
"react_output_parser": parse_react_output,
"run_tool": run_tool,
"process_response": process_response,
"process_agent_response": process_agent_response,
}
)
qp.add_chain(["agent_input", "react_prompt", "llm", "react_output_parser"])
qp.add_link(
"react_output_parser",
"run_tool",
condition_fn=lambda x: not x["done"],
input_fn=lambda x: x["reasoning_step"],
)
qp.add_link(
"react_output_parser",
"process_response",
condition_fn=lambda x: x["done"],
input_fn=lambda x: x["reasoning_step"],
)
qp.add_link("process_response", "process_agent_response")
qp.add_link("run_tool", "process_agent_response")
from pyvis.network import Network
net = Network(notebook=True, cdn_resources="in_line", directed=True)
net.from_nx(qp.clean_dag)
net.show("agent_dag.html")
from llama_index.core.agent import QueryPipelineAgentWorker, AgentRunner
from llama_index.core.callbacks import CallbackManager
agent_worker = | QueryPipelineAgentWorker(qp) | llama_index.core.agent.QueryPipelineAgentWorker |
get_ipython().run_line_magic('pip', 'install llama-index-vector-stores-supabase')
get_ipython().system('pip install llama-index')
import logging
import sys
from llama_index.core import SimpleDirectoryReader, Document, StorageContext
from llama_index.core import VectorStoreIndex
from llama_index.vector_stores.supabase import SupabaseVectorStore
import textwrap
import os
os.environ["OPENAI_API_KEY"] = "[your_openai_api_key]"
get_ipython().system("mkdir -p 'data/paul_graham/'")
get_ipython().system("wget 'https://raw.githubusercontent.com/run-llama/llama_index/main/docs/examples/data/paul_graham/paul_graham_essay.txt' -O 'data/paul_graham/paul_graham_essay.txt'")
documents = SimpleDirectoryReader("./data/paul_graham/").load_data()
print(
"Document ID:",
documents[0].doc_id,
"Document Hash:",
documents[0].doc_hash,
)
vector_store = SupabaseVectorStore(
postgres_connection_string=(
"postgresql://<user>:<password>@<host>:<port>/<db_name>"
),
collection_name="base_demo",
)
storage_context = StorageContext.from_defaults(vector_store=vector_store)
index = VectorStoreIndex.from_documents(
documents, storage_context=storage_context
)
query_engine = index.as_query_engine()
response = query_engine.query("Who is the author?")
print(textwrap.fill(str(response), 100))
response = query_engine.query("What did the author do growing up?")
print(textwrap.fill(str(response), 100))
from llama_index.core.schema import TextNode
nodes = [
TextNode(
**{
"text": "The Shawshank Redemption",
"metadata": {
"author": "Stephen King",
"theme": "Friendship",
},
}
),
TextNode(
**{
"text": "The Godfather",
"metadata": {
"director": "Francis Ford Coppola",
"theme": "Mafia",
},
}
),
TextNode(
**{
"text": "Inception",
"metadata": {
"director": "Christopher Nolan",
},
}
),
]
vector_store = SupabaseVectorStore(
postgres_connection_string=(
"postgresql://<user>:<password>@<host>:<port>/<db_name>"
),
collection_name="metadata_filters_demo",
)
storage_context = StorageContext.from_defaults(vector_store=vector_store)
index = | VectorStoreIndex(nodes, storage_context=storage_context) | llama_index.core.VectorStoreIndex |
get_ipython().run_line_magic('pip', 'install llama-index-embeddings-together')
get_ipython().run_line_magic('pip', 'install llama-index-llms-openai')
get_ipython().run_line_magic('pip', 'install llama-index-embeddings-openai')
get_ipython().run_line_magic('pip', 'install llama-index-readers-file')
domain = "docs.llamaindex.ai"
docs_url = "https://docs.llamaindex.ai/en/latest/"
get_ipython().system('wget -e robots=off --recursive --no-clobber --page-requisites --html-extension --convert-links --restrict-file-names=windows --domains {domain} --no-parent {docs_url}')
from llama_index.readers.file import UnstructuredReader
from pathlib import Path
from llama_index.llms.openai import OpenAI
from llama_index.core import Document
reader = UnstructuredReader()
all_html_files = [
"docs.llamaindex.ai/en/latest/index.html",
"docs.llamaindex.ai/en/latest/contributing/contributing.html",
"docs.llamaindex.ai/en/latest/understanding/understanding.html",
"docs.llamaindex.ai/en/latest/understanding/using_llms/using_llms.html",
"docs.llamaindex.ai/en/latest/understanding/using_llms/privacy.html",
"docs.llamaindex.ai/en/latest/understanding/loading/llamahub.html",
"docs.llamaindex.ai/en/latest/optimizing/production_rag.html",
"docs.llamaindex.ai/en/latest/module_guides/models/llms.html",
]
doc_limit = 10
docs = []
for idx, f in enumerate(all_html_files):
if idx > doc_limit:
break
print(f"Idx {idx}/{len(all_html_files)}")
loaded_docs = reader.load_data(file=f, split_documents=True)
start_idx = 64
loaded_doc = Document(
id_=str(f),
text="\n\n".join([d.get_content() for d in loaded_docs[start_idx:]]),
metadata={"path": str(f)},
)
print(str(f))
docs.append(loaded_doc)
from llama_index.embeddings.together import TogetherEmbedding
from llama_index.embeddings.openai import OpenAIEmbedding
from llama_index.llms.openai import OpenAI
api_key = "<api_key>"
embed_model = TogetherEmbedding(
model_name="togethercomputer/m2-bert-80M-32k-retrieval", api_key=api_key
)
llm = OpenAI(temperature=0, model="gpt-3.5-turbo")
from llama_index.core.storage.docstore import SimpleDocumentStore
for doc in docs:
embedding = embed_model.get_text_embedding(doc.get_content())
doc.embedding = embedding
docstore = SimpleDocumentStore()
docstore.add_documents(docs)
from llama_index.core.schema import IndexNode
from llama_index.core import (
load_index_from_storage,
StorageContext,
VectorStoreIndex,
)
from llama_index.core.node_parser import SentenceSplitter
from llama_index.core import SummaryIndex
from llama_index.core.retrievers import RecursiveRetriever
import os
from tqdm.notebook import tqdm
import pickle
def build_index(docs, out_path: str = "storage/chunk_index"):
nodes = []
splitter = SentenceSplitter(chunk_size=512, chunk_overlap=70)
for idx, doc in enumerate(tqdm(docs)):
cur_nodes = splitter.get_nodes_from_documents([doc])
for cur_node in cur_nodes:
file_path = doc.metadata["path"]
new_node = IndexNode(
text=cur_node.text or "None",
index_id=str(file_path),
metadata=doc.metadata
)
nodes.append(new_node)
print("num nodes: " + str(len(nodes)))
if not os.path.exists(out_path):
index = VectorStoreIndex(nodes, embed_model=embed_model)
index.set_index_id("simple_index")
index.storage_context.persist(f"./{out_path}")
else:
storage_context = StorageContext.from_defaults(
persist_dir=f"./{out_path}"
)
index = load_index_from_storage(
storage_context, index_id="simple_index", embed_model=embed_model
)
return index
index = build_index(docs)
from llama_index.core.retrievers import BaseRetriever
from llama_index.core.indices.query.embedding_utils import get_top_k_embeddings
from llama_index.core import QueryBundle
from llama_index.core.schema import NodeWithScore
from typing import List, Any, Optional
class HybridRetriever(BaseRetriever):
"""Hybrid retriever."""
def __init__(
self,
vector_index,
docstore,
similarity_top_k: int = 2,
out_top_k: Optional[int] = None,
alpha: float = 0.5,
**kwargs: Any,
) -> None:
"""Init params."""
super().__init__(**kwargs)
self._vector_index = vector_index
self._embed_model = vector_index._embed_model
self._retriever = vector_index.as_retriever(
similarity_top_k=similarity_top_k
)
self._out_top_k = out_top_k or similarity_top_k
self._docstore = docstore
self._alpha = alpha
def _retrieve(self, query_bundle: QueryBundle) -> List[NodeWithScore]:
"""Retrieve nodes given query."""
nodes = self._retriever.retrieve(query_bundle.query_str)
docs = [self._docstore.get_document(n.node.index_id) for n in nodes]
doc_embeddings = [d.embedding for d in docs]
query_embedding = self._embed_model.get_query_embedding(
query_bundle.query_str
)
doc_similarities, doc_idxs = get_top_k_embeddings(
query_embedding, doc_embeddings
)
result_tups = []
for doc_idx, doc_similarity in zip(doc_idxs, doc_similarities):
node = nodes[doc_idx]
full_similarity = (self._alpha * node.score) + (
(1 - self._alpha) * doc_similarity
)
print(
f"Doc {doc_idx} (node score, doc similarity, full similarity): {(node.score, doc_similarity, full_similarity)}"
)
result_tups.append((full_similarity, node))
result_tups = sorted(result_tups, key=lambda x: x[0], reverse=True)
for full_score, node in result_tups:
node.score = full_score
return [n for _, n in result_tups][:out_top_k]
top_k = 10
out_top_k = 3
hybrid_retriever = HybridRetriever(
index, docstore, similarity_top_k=top_k, out_top_k=3, alpha=0.5
)
base_retriever = index.as_retriever(similarity_top_k=out_top_k)
def show_nodes(nodes, out_len: int = 200):
for idx, n in enumerate(nodes):
print(f"\n\n >>>>>>>>>>>> ID {n.id_}: {n.metadata['path']}")
print(n.get_content()[:out_len])
query_str = "Tell me more about the LLM interface and where they're used"
nodes = hybrid_retriever.retrieve(query_str)
show_nodes(nodes)
base_nodes = base_retriever.retrieve(query_str)
show_nodes(base_nodes)
from llama_index.core.query_engine import RetrieverQueryEngine
query_engine = | RetrieverQueryEngine(hybrid_retriever) | llama_index.core.query_engine.RetrieverQueryEngine |
get_ipython().run_line_magic('pip', 'install llama-index-llms-openai')
get_ipython().run_line_magic('pip', 'install llama-index-readers-file')
get_ipython().run_line_magic('load_ext', 'autoreload')
get_ipython().run_line_magic('autoreload', '2')
get_ipython().run_line_magic('env', 'OPENAI_API_KEY=YOUR_OPENAI_KEY')
get_ipython().system('pip install llama-index pypdf')
get_ipython().system("mkdir -p 'data/'")
get_ipython().system('wget --user-agent "Mozilla" "https://arxiv.org/pdf/2307.09288.pdf" -O "data/llama2.pdf"')
from pathlib import Path
from llama_index.readers.file import PDFReader
from llama_index.core.response.notebook_utils import display_source_node
from llama_index.core.retrievers import RecursiveRetriever
from llama_index.core.query_engine import RetrieverQueryEngine
from llama_index.core import VectorStoreIndex
from llama_index.llms.openai import OpenAI
import json
loader = PDFReader()
docs0 = loader.load_data(file=Path("./data/llama2.pdf"))
from llama_index.core import Document
doc_text = "\n\n".join([d.get_content() for d in docs0])
docs = [Document(text=doc_text)]
from llama_index.core.node_parser import SentenceSplitter
from llama_index.core.schema import IndexNode
node_parser = SentenceSplitter(chunk_size=1024)
base_nodes = node_parser.get_nodes_from_documents(docs)
for idx, node in enumerate(base_nodes):
node.id_ = f"node-{idx}"
from llama_index.core.embeddings import resolve_embed_model
embed_model = resolve_embed_model("local:BAAI/bge-small-en")
llm = OpenAI(model="gpt-3.5-turbo")
base_index = VectorStoreIndex(base_nodes, embed_model=embed_model)
base_retriever = base_index.as_retriever(similarity_top_k=2)
retrievals = base_retriever.retrieve(
"Can you tell me about the key concepts for safety finetuning"
)
for n in retrievals:
display_source_node(n, source_length=1500)
query_engine_base = RetrieverQueryEngine.from_args(base_retriever, llm=llm)
response = query_engine_base.query(
"Can you tell me about the key concepts for safety finetuning"
)
print(str(response))
sub_chunk_sizes = [128, 256, 512]
sub_node_parsers = [
SentenceSplitter(chunk_size=c, chunk_overlap=20) for c in sub_chunk_sizes
]
all_nodes = []
for base_node in base_nodes:
for n in sub_node_parsers:
sub_nodes = n.get_nodes_from_documents([base_node])
sub_inodes = [
| IndexNode.from_text_node(sn, base_node.node_id) | llama_index.core.schema.IndexNode.from_text_node |
get_ipython().run_line_magic('pip', 'install llama-index-llms-openai')
get_ipython().system('pip install llama-index')
import nest_asyncio
nest_asyncio.apply()
import logging
import sys
logging.basicConfig(stream=sys.stdout, level=logging.INFO)
logging.getLogger().handlers = []
logging.getLogger().addHandler(logging.StreamHandler(stream=sys.stdout))
from llama_index.core import (
VectorStoreIndex,
SimpleDirectoryReader,
StorageContext,
SimpleKeywordTableIndex,
)
from llama_index.core import SummaryIndex
from llama_index.core.node_parser import SentenceSplitter
from llama_index.llms.openai import OpenAI
get_ipython().system("mkdir -p 'data/paul_graham/'")
get_ipython().system("wget 'https://raw.githubusercontent.com/run-llama/llama_index/main/docs/examples/data/paul_graham/paul_graham_essay.txt' -O 'data/paul_graham/paul_graham_essay.txt'")
documents = SimpleDirectoryReader("./data/paul_graham/").load_data()
llm = OpenAI(model="gpt-4")
splitter = SentenceSplitter(chunk_size=1024)
nodes = splitter.get_nodes_from_documents(documents)
storage_context = StorageContext.from_defaults()
storage_context.docstore.add_documents(nodes)
summary_index = SummaryIndex(nodes, storage_context=storage_context)
vector_index = VectorStoreIndex(nodes, storage_context=storage_context)
keyword_index = SimpleKeywordTableIndex(nodes, storage_context=storage_context)
list_retriever = summary_index.as_retriever()
vector_retriever = vector_index.as_retriever()
keyword_retriever = keyword_index.as_retriever()
from llama_index.core.tools import RetrieverTool
list_tool = RetrieverTool.from_defaults(
retriever=list_retriever,
description=(
"Will retrieve all context from Paul Graham's essay on What I Worked"
" On. Don't use if the question only requires more specific context."
),
)
vector_tool = RetrieverTool.from_defaults(
retriever=vector_retriever,
description=(
"Useful for retrieving specific context from Paul Graham essay on What"
" I Worked On."
),
)
keyword_tool = RetrieverTool.from_defaults(
retriever=keyword_retriever,
description=(
"Useful for retrieving specific context from Paul Graham essay on What"
" I Worked On (using entities mentioned in query)"
),
)
from llama_index.core.selectors import LLMSingleSelector, LLMMultiSelector
from llama_index.core.selectors import (
PydanticMultiSelector,
PydanticSingleSelector,
)
from llama_index.core.retrievers import RouterRetriever
from llama_index.core.response.notebook_utils import display_source_node
retriever = RouterRetriever(
selector=PydanticSingleSelector.from_defaults(llm=llm),
retriever_tools=[
list_tool,
vector_tool,
],
)
nodes = retriever.retrieve(
"Can you give me all the context regarding the author's life?"
)
for node in nodes:
display_source_node(node)
nodes = retriever.retrieve("What did Paul Graham do after RISD?")
for node in nodes:
display_source_node(node)
retriever = RouterRetriever(
selector= | PydanticMultiSelector.from_defaults(llm=llm) | llama_index.core.selectors.PydanticMultiSelector.from_defaults |
from llama_index.core import VectorStoreIndex
from llama_index.core.objects import ObjectIndex, SimpleObjectNodeMapping
obj1 = {"input": "Hey, how's it going"}
obj2 = ["a", "b", "c", "d"]
obj3 = "llamaindex is an awesome library!"
arbitrary_objects = [obj1, obj2, obj3]
obj_node_mapping = SimpleObjectNodeMapping.from_objects(arbitrary_objects)
nodes = obj_node_mapping.to_nodes(arbitrary_objects)
object_index = ObjectIndex(
index=VectorStoreIndex(nodes=nodes), object_node_mapping=obj_node_mapping
)
object_retriever = object_index.as_retriever(similarity_top_k=1)
object_retriever.retrieve("llamaindex")
object_index.persist()
reloaded_object_index = ObjectIndex.from_persist_dir()
reloaded_object_index._object_node_mapping.obj_node_mapping
object_index._object_node_mapping.obj_node_mapping
from llama_index.core.tools import FunctionTool
from llama_index.core import SummaryIndex
from llama_index.core.objects import SimpleToolNodeMapping
def add(a: int, b: int) -> int:
"""Add two integers and returns the result integer"""
return a + b
def multiply(a: int, b: int) -> int:
"""Multiple two integers and returns the result integer"""
return a * b
multiply_tool = FunctionTool.from_defaults(fn=multiply)
add_tool = FunctionTool.from_defaults(fn=add)
object_mapping = | SimpleToolNodeMapping.from_objects([add_tool, multiply_tool]) | llama_index.core.objects.SimpleToolNodeMapping.from_objects |
get_ipython().run_line_magic('pip', 'install llama-index-llms-ai21')
get_ipython().system('pip install llama-index')
from llama_index.llms.ai21 import AI21
api_key = "Your api key"
resp = AI21(api_key=api_key).complete("Paul Graham is ")
print(resp)
from llama_index.core.llms import ChatMessage
from llama_index.llms.ai21 import AI21
messages = [
ChatMessage(role="user", content="hello there"),
ChatMessage(
role="assistant", content="Arrrr, matey! How can I help ye today?"
),
ChatMessage(role="user", content="What is your name"),
]
resp = AI21(api_key=api_key).chat(
messages, preamble_override="You are a pirate with a colorful personality"
)
print(resp)
from llama_index.llms.ai21 import AI21
llm = AI21(model="j2-mid", api_key=api_key)
resp = llm.complete("Paul Graham is ")
print(resp)
from llama_index.llms.ai21 import AI21
llm_good = AI21(api_key=api_key)
llm_bad = | AI21(model="j2-mid", api_key="BAD_KEY") | llama_index.llms.ai21.AI21 |
get_ipython().run_line_magic('pip', 'install llama-index-readers-wikipedia')
get_ipython().run_line_magic('pip', 'install llama-index-finetuning')
get_ipython().run_line_magic('pip', 'install llama-index-llms-openai')
get_ipython().run_line_magic('pip', 'install llama-index-finetuning-callbacks')
get_ipython().run_line_magic('pip', 'install llama-index-llms-huggingface')
import nest_asyncio
nest_asyncio.apply()
import os
HUGGING_FACE_TOKEN = os.getenv("HUGGING_FACE_TOKEN")
OPENAI_API_KEY = os.getenv("OPENAI_API_KEY")
get_ipython().system('pip install wikipedia -q')
from llama_index.readers.wikipedia import WikipediaReader
cities = [
"San Francisco",
"Toronto",
"New York",
"Vancouver",
"Montreal",
"Tokyo",
"Singapore",
"Paris",
]
documents = WikipediaReader().load_data(
pages=[f"History of {x}" for x in cities]
)
QUESTION_GEN_PROMPT = (
"You are a Teacher/ Professor. Your task is to setup "
"a quiz/examination. Using the provided context, formulate "
"a single question that captures an important fact from the "
"context. Restrict the question to the context information provided."
)
from llama_index.core.evaluation import DatasetGenerator
from llama_index.llms.openai import OpenAI
gpt_35_llm = OpenAI(model="gpt-3.5-turbo", temperature=0.3)
dataset_generator = DatasetGenerator.from_documents(
documents,
question_gen_query=QUESTION_GEN_PROMPT,
llm=gpt_35_llm,
num_questions_per_chunk=25,
)
qrd = dataset_generator.generate_dataset_from_nodes(num=350)
from llama_index.core import VectorStoreIndex
from llama_index.core.retrievers import VectorIndexRetriever
the_index = VectorStoreIndex.from_documents(documents=documents)
the_retriever = VectorIndexRetriever(
index=the_index,
similarity_top_k=2,
)
from llama_index.core.query_engine import RetrieverQueryEngine
from llama_index.llms.huggingface import HuggingFaceInferenceAPI
llm = HuggingFaceInferenceAPI(
model_name="meta-llama/Llama-2-7b-chat-hf",
context_window=2048, # to use refine
token=HUGGING_FACE_TOKEN,
)
query_engine = RetrieverQueryEngine.from_args(retriever=the_retriever, llm=llm)
import tqdm
train_dataset = []
num_train_questions = int(0.65 * len(qrd.qr_pairs))
for q, a in tqdm.tqdm(qrd.qr_pairs[:num_train_questions]):
data_entry = {"question": q, "reference": a}
response = query_engine.query(q)
response_struct = {}
response_struct["model"] = "llama-2"
response_struct["text"] = str(response)
response_struct["context"] = (
response.source_nodes[0].node.text[:1000] + "..."
)
data_entry["response_data"] = response_struct
train_dataset.append(data_entry)
from llama_index.llms.openai import OpenAI
from llama_index.finetuning.callbacks import OpenAIFineTuningHandler
from llama_index.core.callbacks import CallbackManager
from llama_index.core.evaluation import CorrectnessEvaluator
finetuning_handler = OpenAIFineTuningHandler()
callback_manager = CallbackManager([finetuning_handler])
gpt_4_llm = OpenAI(
temperature=0, model="gpt-4", callback_manager=callback_manager
)
gpt4_judge = | CorrectnessEvaluator(llm=gpt_4_llm) | llama_index.core.evaluation.CorrectnessEvaluator |
from llama_index.agent import OpenAIAgent
import openai
openai.api_key = "sk-api-key"
from llama_index.tools.gmail.base import GmailToolSpec
from llama_index.tools.google_calendar.base import GoogleCalendarToolSpec
from llama_index.tools.google_search.base import GoogleSearchToolSpec
gmail_tools = | GmailToolSpec() | llama_index.tools.gmail.base.GmailToolSpec |
get_ipython().run_line_magic('pip', 'install llama-index')
get_ipython().run_line_magic('pip', 'install llama-index-readers-web')
get_ipython().run_line_magic('pip', 'install llama-index-packs-arize-phoenix-query-engine')
import os
from llama_index.packs.arize_phoenix_query_engine import ArizePhoenixQueryEnginePack
from llama_index.core.node_parser import SentenceSplitter
from llama_index.readers.web import SimpleWebPageReader
from tqdm.auto import tqdm
os.environ["OPENAI_API_KEY"] = "copy-your-openai-api-key-here"
documents = | SimpleWebPageReader() | llama_index.readers.web.SimpleWebPageReader |
import openai
openai.api_key = "sk-your-key"
from llama_index.agent import OpenAIAgent
from llama_index.tools.text_to_image.base import TextToImageToolSpec
text_to_image_spec = TextToImageToolSpec()
tools = text_to_image_spec.to_tool_list()
agent = | OpenAIAgent.from_tools(tools, verbose=True) | llama_index.agent.OpenAIAgent.from_tools |
get_ipython().run_line_magic('pip', 'install llama-index-llms-openai')
get_ipython().system('pip install llama-index')
import logging
import sys
import pandas as pd
logging.basicConfig(stream=sys.stdout, level=logging.INFO)
logging.getLogger().addHandler(logging.StreamHandler(stream=sys.stdout))
from llama_index.core.evaluation import DatasetGenerator, RelevancyEvaluator
from llama_index.core import SimpleDirectoryReader, VectorStoreIndex, Response
from llama_index.llms.openai import OpenAI
get_ipython().system("mkdir -p 'data/paul_graham/'")
get_ipython().system("wget 'https://raw.githubusercontent.com/run-llama/llama_index/main/docs/examples/data/paul_graham/paul_graham_essay.txt' -O 'data/paul_graham/paul_graham_essay.txt'")
reader = SimpleDirectoryReader("./data/paul_graham/")
documents = reader.load_data()
data_generator = DatasetGenerator.from_documents(documents)
eval_questions = data_generator.generate_questions_from_nodes()
eval_questions
gpt4 = OpenAI(temperature=0, model="gpt-4")
evaluator_gpt4 = | RelevancyEvaluator(llm=gpt4) | llama_index.core.evaluation.RelevancyEvaluator |
get_ipython().run_line_magic('pip', 'install llama-index-storage-docstore-firestore')
get_ipython().run_line_magic('pip', 'install llama-index-storage-kvstore-firestore')
get_ipython().run_line_magic('pip', 'install llama-index-storage-index-store-firestore')
get_ipython().run_line_magic('pip', 'install llama-index-llms-openai')
get_ipython().system('pip install llama-index')
import nest_asyncio
nest_asyncio.apply()
import logging
import sys
logging.basicConfig(stream=sys.stdout, level=logging.INFO)
logging.getLogger().addHandler(logging.StreamHandler(stream=sys.stdout))
from llama_index.core import SimpleDirectoryReader, StorageContext
from llama_index.core import VectorStoreIndex, SimpleKeywordTableIndex
from llama_index.core import SummaryIndex
from llama_index.core import ComposableGraph
from llama_index.llms.openai import OpenAI
from llama_index.core.response.notebook_utils import display_response
from llama_index.core import Settings
get_ipython().system("mkdir -p 'data/paul_graham/'")
get_ipython().system("wget 'https://raw.githubusercontent.com/run-llama/llama_index/main/docs/examples/data/paul_graham/paul_graham_essay.txt' -O 'data/paul_graham/paul_graham_essay.txt'")
reader = SimpleDirectoryReader("./data/paul_graham/")
documents = reader.load_data()
from llama_index.core.node_parser import SentenceSplitter
nodes = SentenceSplitter().get_nodes_from_documents(documents)
from llama_index.storage.kvstore.firestore import FirestoreKVStore
from llama_index.storage.docstore.firestore import FirestoreDocumentStore
from llama_index.storage.index_store.firestore import FirestoreIndexStore
kvstore = FirestoreKVStore()
storage_context = StorageContext.from_defaults(
docstore=FirestoreDocumentStore(kvstore),
index_store=FirestoreIndexStore(kvstore),
)
storage_context.docstore.add_documents(nodes)
summary_index = SummaryIndex(nodes, storage_context=storage_context)
vector_index = VectorStoreIndex(nodes, storage_context=storage_context)
keyword_table_index = SimpleKeywordTableIndex(
nodes, storage_context=storage_context
)
len(storage_context.docstore.docs)
storage_context.persist()
list_id = summary_index.index_id
vector_id = vector_index.index_id
keyword_id = keyword_table_index.index_id
from llama_index.core import load_index_from_storage
kvstore = FirestoreKVStore()
storage_context = StorageContext.from_defaults(
docstore= | FirestoreDocumentStore(kvstore) | llama_index.storage.docstore.firestore.FirestoreDocumentStore |
get_ipython().run_line_magic('pip', 'install llama-index-llms-gemini')
get_ipython().run_line_magic('pip', 'install llama-index-vector-stores-google')
get_ipython().run_line_magic('pip', 'install llama-index-indices-managed-google')
get_ipython().run_line_magic('pip', 'install llama-index-response-synthesizers-google')
get_ipython().run_line_magic('pip', 'install llama-index')
get_ipython().run_line_magic('pip', 'install "google-ai-generativelanguage>=0.4,<=1.0"')
get_ipython().run_line_magic('pip', 'install google-auth-oauthlib')
from google.oauth2 import service_account
from llama_index.vector_stores.google import set_google_config
credentials = service_account.Credentials.from_service_account_file(
"service_account_key.json",
scopes=[
"https://www.googleapis.com/auth/generative-language.retriever",
],
)
set_google_config(auth_credentials=credentials)
get_ipython().system("mkdir -p 'data/paul_graham/'")
get_ipython().system("wget 'https://raw.githubusercontent.com/run-llama/llama_index/main/docs/examples/data/paul_graham/paul_graham_essay.txt' -O 'data/paul_graham/paul_graham_essay.txt'")
import llama_index.core.vector_stores.google.generativeai.genai_extension as genaix
from typing import Iterable
from random import randrange
LLAMA_INDEX_COLAB_CORPUS_ID_PREFIX = f"llama-index-colab"
SESSION_CORPUS_ID_PREFIX = (
f"{LLAMA_INDEX_COLAB_CORPUS_ID_PREFIX}-{randrange(1000000)}"
)
def corpus_id(num_id: int) -> str:
return f"{SESSION_CORPUS_ID_PREFIX}-{num_id}"
SESSION_CORPUS_ID = corpus_id(1)
def list_corpora() -> Iterable[genaix.Corpus]:
client = genaix.build_semantic_retriever()
yield from | genaix.list_corpora(client=client) | llama_index.core.vector_stores.google.generativeai.genai_extension.list_corpora |
import openai
openai.api_key = "sk-xxx"
from llama_index.agent.openai import OpenAIAgent
from llama_index.tools.brave_search.base import BraveSearchToolSpec
brave_tool = | BraveSearchToolSpec(api_key="your-api-key") | llama_index.tools.brave_search.base.BraveSearchToolSpec |
get_ipython().run_line_magic('pip', 'install llama-index-llms-openai')
get_ipython().run_line_magic('pip', 'install llama-index-readers-wikipedia')
from llama_index.core.llama_dataset import (
LabelledRagDataExample,
CreatedByType,
CreatedBy,
)
query = "This is a test query, is it not?"
query_by = CreatedBy(type=CreatedByType.AI, model_name="gpt-4")
reference_answer = "Yes it is."
reference_answer_by = CreatedBy(type=CreatedByType.HUMAN)
reference_contexts = ["This is a sample context"]
rag_example = LabelledRagDataExample(
query=query,
query_by=query_by,
reference_contexts=reference_contexts,
reference_answer=reference_answer,
reference_answer_by=reference_answer_by,
)
print(rag_example.json())
LabelledRagDataExample.parse_raw(rag_example.json())
rag_example.dict()
LabelledRagDataExample.parse_obj(rag_example.dict())
query = "This is a test query, is it so?"
reference_answer = "I think yes, it is."
reference_contexts = ["This is a second sample context"]
rag_example_2 = LabelledRagDataExample(
query=query,
query_by=query_by,
reference_contexts=reference_contexts,
reference_answer=reference_answer,
reference_answer_by=reference_answer_by,
)
from llama_index.core.llama_dataset import LabelledRagDataset
rag_dataset = | LabelledRagDataset(examples=[rag_example, rag_example_2]) | llama_index.core.llama_dataset.LabelledRagDataset |
get_ipython().system('pip install -U llama-index-multi-modal-llms-dashscope')
get_ipython().run_line_magic('env', 'DASHSCOPE_API_KEY=YOUR_DASHSCOPE_API_KEY')
from llama_index.multi_modal_llms.dashscope import (
DashScopeMultiModal,
DashScopeMultiModalModels,
)
from llama_index.core.multi_modal_llms.generic_utils import load_image_urls
image_urls = [
"https://dashscope.oss-cn-beijing.aliyuncs.com/images/dog_and_girl.jpeg",
]
image_documents = load_image_urls(image_urls)
dashscope_multi_modal_llm = DashScopeMultiModal(
model_name=DashScopeMultiModalModels.QWEN_VL_MAX,
)
complete_response = dashscope_multi_modal_llm.complete(
prompt="What's in the image?",
image_documents=image_documents,
)
print(complete_response)
multi_image_urls = [
"https://dashscope.oss-cn-beijing.aliyuncs.com/images/dog_and_girl.jpeg",
"https://dashscope.oss-cn-beijing.aliyuncs.com/images/panda.jpeg",
]
multi_image_documents = load_image_urls(multi_image_urls)
complete_response = dashscope_multi_modal_llm.complete(
prompt="What animals are in the pictures?",
image_documents=multi_image_documents,
)
print(complete_response)
stream_complete_response = dashscope_multi_modal_llm.stream_complete(
prompt="What's in the image?",
image_documents=image_documents,
)
for r in stream_complete_response:
print(r.delta, end="")
from llama_index.core.base.llms.types import MessageRole
from llama_index.multi_modal_llms.dashscope.utils import (
create_dashscope_multi_modal_chat_message,
)
chat_message_user_1 = create_dashscope_multi_modal_chat_message(
"What's in the image?", MessageRole.USER, image_documents
)
chat_response = dashscope_multi_modal_llm.chat([chat_message_user_1])
print(chat_response.message.content[0]["text"])
chat_message_assistent_1 = create_dashscope_multi_modal_chat_message(
chat_response.message.content[0]["text"], MessageRole.ASSISTANT, None
)
chat_message_user_2 = create_dashscope_multi_modal_chat_message(
"what are they doing?", MessageRole.USER, None
)
chat_response = dashscope_multi_modal_llm.chat(
[chat_message_user_1, chat_message_assistent_1, chat_message_user_2]
)
print(chat_response.message.content[0]["text"])
stream_chat_response = dashscope_multi_modal_llm.stream_chat(
[chat_message_user_1, chat_message_assistent_1, chat_message_user_2]
)
for r in stream_chat_response:
print(r.delta, end="")
from llama_index.multi_modal_llms.dashscope.utils import load_local_images
local_images = [
"file://THE_FILE_PATH1",
"file://THE_FILE_PATH2",
]
image_documents = | load_local_images(local_images) | llama_index.multi_modal_llms.dashscope.utils.load_local_images |
get_ipython().run_line_magic('pip', 'install llama-index-readers-file')
get_ipython().run_line_magic('pip', 'install llama-index-llms-openai')
get_ipython().run_line_magic('pip', 'install llama-index-embeddings-openai')
get_ipython().system('pip install llama-index')
import pandas as pd
pd.set_option("display.max_rows", None)
pd.set_option("display.max_columns", None)
pd.set_option("display.width", None)
pd.set_option("display.max_colwidth", None)
get_ipython().system('wget "https://www.dropbox.com/scl/fi/mlaymdy1ni1ovyeykhhuk/tesla_2021_10k.htm?rlkey=qf9k4zn0ejrbm716j0gg7r802&dl=1" -O tesla_2021_10k.htm')
get_ipython().system('wget "https://www.dropbox.com/scl/fi/rkw0u959yb4w8vlzz76sa/tesla_2020_10k.htm?rlkey=tfkdshswpoupav5tqigwz1mp7&dl=1" -O tesla_2020_10k.htm')
from llama_index.readers.file import FlatReader
from pathlib import Path
reader = FlatReader()
docs = reader.load_data(Path("./tesla_2020_10k.htm"))
from llama_index.core.evaluation import DatasetGenerator, QueryResponseDataset
from llama_index.llms.openai import OpenAI
from llama_index.embeddings.openai import OpenAIEmbedding
from llama_index.readers.file import FlatReader
from llama_index.core.node_parser import HTMLNodeParser, SentenceSplitter
from llama_index.core.ingestion import IngestionPipeline
from pathlib import Path
import nest_asyncio
nest_asyncio.apply()
reader = FlatReader()
docs = reader.load_data(Path("./tesla_2020_10k.htm"))
pipeline = IngestionPipeline(
documents=docs,
transformations=[
HTMLNodeParser.from_defaults(),
SentenceSplitter(chunk_size=1024, chunk_overlap=200),
OpenAIEmbedding(),
],
)
eval_nodes = pipeline.run(documents=docs)
eval_llm = OpenAI(model="gpt-3.5-turbo")
dataset_generator = DatasetGenerator(
eval_nodes[:100],
llm=eval_llm,
show_progress=True,
num_questions_per_chunk=3,
)
eval_dataset = await dataset_generator.agenerate_dataset_from_nodes(num=100)
len(eval_dataset.qr_pairs)
eval_dataset.save_json("data/tesla10k_eval_dataset.json")
eval_dataset = QueryResponseDataset.from_json(
"data/tesla10k_eval_dataset.json"
)
eval_qs = eval_dataset.questions
qr_pairs = eval_dataset.qr_pairs
ref_response_strs = [r for (_, r) in qr_pairs]
from llama_index.core.evaluation import (
CorrectnessEvaluator,
SemanticSimilarityEvaluator,
)
from llama_index.core.evaluation.eval_utils import (
get_responses,
get_results_df,
)
from llama_index.core.evaluation import BatchEvalRunner
evaluator_c = CorrectnessEvaluator(llm=eval_llm)
evaluator_s = SemanticSimilarityEvaluator(llm=eval_llm)
evaluator_dict = {
"correctness": evaluator_c,
"semantic_similarity": evaluator_s,
}
batch_eval_runner = BatchEvalRunner(
evaluator_dict, workers=2, show_progress=True
)
from llama_index.core import VectorStoreIndex
async def run_evals(
pipeline, batch_eval_runner, docs, eval_qs, eval_responses_ref
):
nodes = pipeline.run(documents=docs)
vector_index = VectorStoreIndex(nodes)
query_engine = vector_index.as_query_engine()
pred_responses = get_responses(eval_qs, query_engine, show_progress=True)
eval_results = await batch_eval_runner.aevaluate_responses(
eval_qs, responses=pred_responses, reference=eval_responses_ref
)
return eval_results
from llama_index.core.node_parser import HTMLNodeParser, SentenceSplitter
sent_parser_o0 = SentenceSplitter(chunk_size=1024, chunk_overlap=0)
sent_parser_o200 = SentenceSplitter(chunk_size=1024, chunk_overlap=200)
sent_parser_o500 = SentenceSplitter(chunk_size=1024, chunk_overlap=600)
html_parser = HTMLNodeParser.from_defaults()
parser_dict = {
"sent_parser_o0": sent_parser_o0,
"sent_parser_o200": sent_parser_o200,
"sent_parser_o500": sent_parser_o500,
}
from llama_index.embeddings.openai import OpenAIEmbedding
from llama_index.core.ingestion import IngestionPipeline
pipeline_dict = {}
for k, parser in parser_dict.items():
pipeline = IngestionPipeline(
documents=docs,
transformations=[
html_parser,
parser,
OpenAIEmbedding(),
],
)
pipeline_dict[k] = pipeline
eval_results_dict = {}
for k, pipeline in pipeline_dict.items():
eval_results = await run_evals(
pipeline, batch_eval_runner, docs, eval_qs, ref_response_strs
)
eval_results_dict[k] = eval_results
import pickle
pickle.dump(eval_results_dict, open("eval_results_1.pkl", "wb"))
eval_results_list = list(eval_results_dict.items())
results_df = get_results_df(
[v for _, v in eval_results_list],
[k for k, _ in eval_results_list],
["correctness", "semantic_similarity"],
)
display(results_df)
for k, pipeline in pipeline_dict.items():
pipeline.cache.persist(f"./cache/{k}.json")
from llama_index.core.extractors import (
TitleExtractor,
QuestionsAnsweredExtractor,
SummaryExtractor,
)
from llama_index.core.node_parser import HTMLNodeParser, SentenceSplitter
extractor_dict = {
"summary": SummaryExtractor(in_place=False),
"qa": QuestionsAnsweredExtractor(in_place=False),
"default": None,
}
html_parser = HTMLNodeParser.from_defaults()
sent_parser_o200 = SentenceSplitter(chunk_size=1024, chunk_overlap=200)
pipeline_dict = {}
html_parser = HTMLNodeParser.from_defaults()
for k, extractor in extractor_dict.items():
if k == "default":
transformations = [
html_parser,
sent_parser_o200,
OpenAIEmbedding(),
]
else:
transformations = [
html_parser,
sent_parser_o200,
extractor,
OpenAIEmbedding(),
]
pipeline = | IngestionPipeline(transformations=transformations) | llama_index.core.ingestion.IngestionPipeline |
get_ipython().run_line_magic('pip', 'install llama-index-vector-stores-milvus')
get_ipython().system(' pip install llama-index')
import logging
import sys
from llama_index.core import VectorStoreIndex, SimpleDirectoryReader, Document
from llama_index.vector_stores.milvus import MilvusVectorStore
from IPython.display import Markdown, display
import textwrap
import openai
openai.api_key = "sk-"
get_ipython().system(" mkdir -p 'data/paul_graham/'")
get_ipython().system(" wget 'https://raw.githubusercontent.com/run-llama/llama_index/main/docs/examples/data/paul_graham/paul_graham_essay.txt' -O 'data/paul_graham/paul_graham_essay.txt'")
documents = SimpleDirectoryReader("./data/paul_graham/").load_data()
print("Document ID:", documents[0].doc_id)
from llama_index.core import StorageContext
vector_store = MilvusVectorStore(dim=1536, overwrite=True)
storage_context = StorageContext.from_defaults(vector_store=vector_store)
index = VectorStoreIndex.from_documents(
documents, storage_context=storage_context
)
query_engine = index.as_query_engine()
response = query_engine.query("What did the author learn?")
print(textwrap.fill(str(response), 100))
response = query_engine.query("What was a hard moment for the author?")
print(textwrap.fill(str(response), 100))
vector_store = MilvusVectorStore(dim=1536, overwrite=True)
storage_context = StorageContext.from_defaults(vector_store=vector_store)
index = VectorStoreIndex.from_documents(
[ | Document(text="The number that is being searched for is ten.") | llama_index.core.Document |
get_ipython().run_line_magic('pip', 'install llama-index-readers-database')
get_ipython().system('pip install llama-index')
import logging
import sys
logging.basicConfig(stream=sys.stdout, level=logging.INFO)
logging.getLogger().addHandler(logging.StreamHandler(stream=sys.stdout))
from __future__ import absolute_import
import os
os.environ["OPENAI_API_KEY"] = ""
from llama_index.readers.database import DatabaseReader
from llama_index.core import VectorStoreIndex
db = DatabaseReader(
scheme="postgresql", # Database Scheme
host="localhost", # Database Host
port="5432", # Database Port
user="postgres", # Database User
password="FakeExamplePassword", # Database Password
dbname="postgres", # Database Name
)
print(type(db))
print(type(db.load_data))
print(type(db.sql_database))
print(type(db.sql_database.from_uri))
print(type(db.sql_database.get_single_table_info))
print(type(db.sql_database.get_table_columns))
print(type(db.sql_database.get_usable_table_names))
print(type(db.sql_database.insert_into_table))
print(type(db.sql_database.run_sql))
print(type(db.sql_database.dialect))
print(type(db.sql_database.engine))
print(type(db.sql_database))
db_from_sql_database = DatabaseReader(sql_database=db.sql_database)
print(type(db_from_sql_database))
print(type(db.sql_database.engine))
db_from_engine = | DatabaseReader(engine=db.sql_database.engine) | llama_index.readers.database.DatabaseReader |
get_ipython().run_line_magic('pip', 'install llama-index-embeddings-openai')
get_ipython().run_line_magic('pip', 'install llama-index-postprocessor-cohere-rerank')
get_ipython().run_line_magic('pip', 'install llama-index-llms-openai')
import phoenix as px
px.launch_app()
import llama_index.core
llama_index.core.set_global_handler("arize_phoenix")
from llama_index.llms.openai import OpenAI
from llama_index.embeddings.openai import OpenAIEmbedding
from llama_index.core import Settings
Settings.llm = OpenAI(model="gpt-3.5-turbo")
Settings.embed_model = | OpenAIEmbedding(model="text-embedding-3-small") | llama_index.embeddings.openai.OpenAIEmbedding |
get_ipython().run_line_magic('pip', 'install llama-index-readers-notion')
import logging
import sys
logging.basicConfig(stream=sys.stdout, level=logging.INFO)
logging.getLogger().addHandler(logging.StreamHandler(stream=sys.stdout))
get_ipython().system('pip install llama-index')
from llama_index.core import SummaryIndex
from llama_index.readers.notion import NotionPageReader
from IPython.display import Markdown, display
import os
integration_token = os.getenv("NOTION_INTEGRATION_TOKEN")
page_ids = ["<page_id>"]
documents = | NotionPageReader(integration_token=integration_token) | llama_index.readers.notion.NotionPageReader |
get_ipython().run_line_magic('pip', 'install llama-index-llms-openai')
get_ipython().run_line_magic('pip', 'install llama-index-retrievers-bm25')
get_ipython().system('pip install llama-index')
import nest_asyncio
nest_asyncio.apply()
import os
import openai
os.environ["OPENAI_API_KEY"] = "sk-..."
openai.api_key = os.environ["OPENAI_API_KEY"]
import logging
import sys
logging.basicConfig(stream=sys.stdout, level=logging.INFO)
logging.getLogger().handlers = []
logging.getLogger().addHandler(logging.StreamHandler(stream=sys.stdout))
from llama_index.core import (
SimpleDirectoryReader,
StorageContext,
VectorStoreIndex,
)
from llama_index.retrievers.bm25 import BM25Retriever
from llama_index.core.retrievers import VectorIndexRetriever
from llama_index.core.node_parser import SentenceSplitter
from llama_index.llms.openai import OpenAI
get_ipython().system("mkdir -p 'data/paul_graham/'")
get_ipython().system("wget 'https://raw.githubusercontent.com/run-llama/llama_index/main/docs/examples/data/paul_graham/paul_graham_essay.txt' -O 'data/paul_graham/paul_graham_essay.txt'")
documents = SimpleDirectoryReader("./data/paul_graham").load_data()
llm = OpenAI(model="gpt-4")
splitter = SentenceSplitter(chunk_size=1024)
nodes = splitter.get_nodes_from_documents(documents)
storage_context = StorageContext.from_defaults()
storage_context.docstore.add_documents(nodes)
index = VectorStoreIndex(
nodes=nodes,
storage_context=storage_context,
)
retriever = BM25Retriever.from_defaults(nodes=nodes, similarity_top_k=2)
from llama_index.core.response.notebook_utils import display_source_node
nodes = retriever.retrieve("What happened at Viaweb and Interleaf?")
for node in nodes:
display_source_node(node)
nodes = retriever.retrieve("What did Paul Graham do after RISD?")
for node in nodes:
display_source_node(node)
from llama_index.core.tools import RetrieverTool
vector_retriever = VectorIndexRetriever(index)
bm25_retriever = BM25Retriever.from_defaults(nodes=nodes, similarity_top_k=2)
retriever_tools = [
RetrieverTool.from_defaults(
retriever=vector_retriever,
description="Useful in most cases",
),
RetrieverTool.from_defaults(
retriever=bm25_retriever,
description="Useful if searching about specific information",
),
]
from llama_index.core.retrievers import RouterRetriever
retriever = RouterRetriever.from_defaults(
retriever_tools=retriever_tools,
llm=llm,
select_multi=True,
)
nodes = retriever.retrieve(
"Can you give me all the context regarding the author's life?"
)
for node in nodes:
display_source_node(node)
get_ipython().system('curl https://www.ipcc.ch/report/ar6/wg2/downloads/report/IPCC_AR6_WGII_Chapter03.pdf --output IPCC_AR6_WGII_Chapter03.pdf')
from llama_index.core import (
VectorStoreIndex,
StorageContext,
SimpleDirectoryReader,
Document,
)
from llama_index.core.node_parser import SentenceSplitter
from llama_index.llms.openai import OpenAI
documents = SimpleDirectoryReader(
input_files=["IPCC_AR6_WGII_Chapter03.pdf"]
).load_data()
llm = OpenAI(model="gpt-3.5-turbo")
splitter = | SentenceSplitter(chunk_size=256) | llama_index.core.node_parser.SentenceSplitter |
get_ipython().run_line_magic('pip', 'install llama-index-question-gen-openai')
get_ipython().run_line_magic('pip', 'install llama-index-llms-openai')
from IPython.display import Markdown, display
def display_prompt_dict(prompts_dict):
for k, p in prompts_dict.items():
text_md = f"**Prompt Key**: {k}<br>" f"**Text:** <br>"
display(Markdown(text_md))
print(p.get_template())
display(Markdown("<br><br>"))
from llama_index.core.selectors import LLMSingleSelector, LLMMultiSelector
from llama_index.core.selectors import (
PydanticMultiSelector,
PydanticSingleSelector,
)
selector = LLMMultiSelector.from_defaults()
from llama_index.core.tools import ToolMetadata
tool_choices = [
ToolMetadata(
name="covid_nyt",
description=("This tool contains a NYT news article about COVID-19"),
),
ToolMetadata(
name="covid_wiki",
description=("This tool contains the Wikipedia page about COVID-19"),
),
ToolMetadata(
name="covid_tesla",
description=("This tool contains the Wikipedia page about apples"),
),
]
display_prompt_dict(selector.get_prompts())
selector_result = selector.select(
tool_choices, query="Tell me more about COVID-19"
)
selector_result.selections
from llama_index.core import PromptTemplate
from llama_index.llms.openai import OpenAI
query_gen_str = """\
You are a helpful assistant that generates multiple search queries based on a \
single input query. Generate {num_queries} search queries, one on each line, \
related to the following input query:
Query: {query}
Queries:
"""
query_gen_prompt = PromptTemplate(query_gen_str)
llm = OpenAI(model="gpt-3.5-turbo")
def generate_queries(query: str, llm, num_queries: int = 4):
response = llm.predict(
query_gen_prompt, num_queries=num_queries, query=query
)
queries = response.split("\n")
queries_str = "\n".join(queries)
print(f"Generated queries:\n{queries_str}")
return queries
queries = generate_queries("What happened at Interleaf and Viaweb?", llm)
queries
from llama_index.core.indices.query.query_transform import HyDEQueryTransform
from llama_index.llms.openai import OpenAI
hyde = HyDEQueryTransform(include_original=True)
llm = OpenAI(model="gpt-3.5-turbo")
query_bundle = hyde.run("What is Bel?")
new_query.custom_embedding_strs
from llama_index.core.question_gen import LLMQuestionGenerator
from llama_index.question_gen.openai import OpenAIQuestionGenerator
from llama_index.llms.openai import OpenAI
llm = OpenAI()
question_gen = OpenAIQuestionGenerator.from_defaults(llm=llm)
display_prompt_dict(question_gen.get_prompts())
from llama_index.core.tools import ToolMetadata
tool_choices = [
ToolMetadata(
name="uber_2021_10k",
description=(
"Provides information about Uber financials for year 2021"
),
),
ToolMetadata(
name="lyft_2021_10k",
description=(
"Provides information about Lyft financials for year 2021"
),
),
]
from llama_index.core import QueryBundle
query_str = "Compare and contrast Uber and Lyft"
choices = question_gen.generate(tool_choices, QueryBundle(query_str=query_str))
choices
from llama_index.core.agent import ReActChatFormatter
from llama_index.core.agent.react.output_parser import ReActOutputParser
from llama_index.core.tools import FunctionTool
from llama_index.core.llms import ChatMessage
def execute_sql(sql: str) -> str:
"""Given a SQL input string, execute it."""
return f"Executed {sql}"
def add(a: int, b: int) -> int:
"""Add two numbers."""
return a + b
tool1 = FunctionTool.from_defaults(fn=execute_sql)
tool2 = FunctionTool.from_defaults(fn=add)
tools = [tool1, tool2]
chat_formatter = ReActChatFormatter()
output_parser = | ReActOutputParser() | llama_index.core.agent.react.output_parser.ReActOutputParser |
get_ipython().run_line_magic('pip', 'install llama-index-storage-docstore-mongodb')
get_ipython().run_line_magic('pip', 'install llama-index-storage-index-store-mongodb')
get_ipython().run_line_magic('pip', 'install llama-index-llms-openai')
get_ipython().system('pip install llama-index')
import nest_asyncio
nest_asyncio.apply()
import logging
import sys
import os
logging.basicConfig(stream=sys.stdout, level=logging.INFO)
logging.getLogger().addHandler(logging.StreamHandler(stream=sys.stdout))
from llama_index.core import SimpleDirectoryReader, StorageContext
from llama_index.core import VectorStoreIndex, SimpleKeywordTableIndex
from llama_index.core import SummaryIndex
from llama_index.core import ComposableGraph
from llama_index.llms.openai import OpenAI
from llama_index.core.response.notebook_utils import display_response
from llama_index.core import Settings
get_ipython().system("mkdir -p 'data/paul_graham/'")
get_ipython().system("wget 'https://raw.githubusercontent.com/run-llama/llama_index/main/docs/examples/data/paul_graham/paul_graham_essay.txt' -O 'data/paul_graham/paul_graham_essay.txt'")
reader = SimpleDirectoryReader("./data/paul_graham/")
documents = reader.load_data()
from llama_index.core.node_parser import SentenceSplitter
nodes = SentenceSplitter().get_nodes_from_documents(documents)
MONGO_URI = os.environ["MONGO_URI"]
from llama_index.storage.docstore.mongodb import MongoDocumentStore
from llama_index.storage.index_store.mongodb import MongoIndexStore
storage_context = StorageContext.from_defaults(
docstore=MongoDocumentStore.from_uri(uri=MONGO_URI),
index_store=MongoIndexStore.from_uri(uri=MONGO_URI),
)
storage_context.docstore.add_documents(nodes)
summary_index = SummaryIndex(nodes, storage_context=storage_context)
vector_index = | VectorStoreIndex(nodes, storage_context=storage_context) | llama_index.core.VectorStoreIndex |
get_ipython().run_line_magic('pip', 'install llama-index-llms-openai')
get_ipython().run_line_magic('pip', 'install llama-hub-llama-packs-tables-chain-of-table-base')
get_ipython().system('wget "https://github.com/ppasupat/WikiTableQuestions/releases/download/v1.0.2/WikiTableQuestions-1.0.2-compact.zip" -O data.zip')
get_ipython().system('unzip data.zip')
import pandas as pd
df = pd.read_csv("./WikiTableQuestions/csv/200-csv/3.csv")
df
from llama_index.packs.tables.chain_of_table.base import (
ChainOfTableQueryEngine,
serialize_table,
)
from llama_index.core.llama_pack import download_llama_pack
download_llama_pack(
"ChainOfTablePack",
"./chain_of_table_pack",
skip_load=True,
)
from llama_index.llms.openai import OpenAI
llm = OpenAI(model="gpt-4-1106-preview")
import phoenix as px
import llama_index.core
px.launch_app()
llama_index.core.set_global_handler("arize_phoenix")
import pandas as pd
df = pd.read_csv("~/Downloads/WikiTableQuestions/csv/200-csv/11.csv")
df
query_engine = | ChainOfTableQueryEngine(df, llm=llm, verbose=True) | llama_index.packs.tables.chain_of_table.base.ChainOfTableQueryEngine |
get_ipython().run_line_magic('pip', 'install llama-index-llms-openai')
get_ipython().run_line_magic('pip', 'install llama-index-readers-file')
import nest_asyncio
nest_asyncio.apply()
get_ipython().system('mkdir data && wget --user-agent "Mozilla" "https://arxiv.org/pdf/2307.09288.pdf" -O "data/llama2.pdf"')
get_ipython().system('pip install llama_hub')
from pathlib import Path
from llama_index.readers.file import PDFReader
from llama_index.readers.file import UnstructuredReader
from llama_index.readers.file import PyMuPDFReader
loader = PDFReader()
docs0 = loader.load_data(file=Path("./data/llama2.pdf"))
from llama_index.core import Document
doc_text = "\n\n".join([d.get_content() for d in docs0])
docs = [Document(text=doc_text)]
from llama_index.core.node_parser import SentenceSplitter
from llama_index.core.schema import IndexNode
node_parser = SentenceSplitter(chunk_size=1024)
base_nodes = node_parser.get_nodes_from_documents(docs)
from llama_index.core import VectorStoreIndex
from llama_index.llms.openai import OpenAI
from llama_index.core import Settings
Settings.llm = OpenAI(model="gpt-3.5-turbo")
index = VectorStoreIndex(base_nodes)
query_engine = index.as_query_engine(similarity_top_k=2)
from llama_index.core.evaluation import DatasetGenerator, QueryResponseDataset
from llama_index.core.node_parser import SimpleNodeParser
dataset_generator = DatasetGenerator(
base_nodes[:20],
llm=OpenAI(model="gpt-4"),
show_progress=True,
num_questions_per_chunk=3,
)
eval_dataset = await dataset_generator.agenerate_dataset_from_nodes(num=60)
eval_dataset.save_json("data/llama2_eval_qr_dataset.json")
eval_dataset = QueryResponseDataset.from_json(
"data/llama2_eval_qr_dataset.json"
)
import random
full_qr_pairs = eval_dataset.qr_pairs
num_exemplars = 2
num_eval = 40
exemplar_qr_pairs = random.sample(full_qr_pairs, num_exemplars)
eval_qr_pairs = random.sample(full_qr_pairs, num_eval)
len(exemplar_qr_pairs)
from llama_index.core.evaluation.eval_utils import get_responses
from llama_index.core.evaluation import CorrectnessEvaluator, BatchEvalRunner
evaluator_c = CorrectnessEvaluator(llm=OpenAI(model="gpt-3.5-turbo"))
evaluator_dict = {
"correctness": evaluator_c,
}
batch_runner = BatchEvalRunner(evaluator_dict, workers=2, show_progress=True)
async def get_correctness(query_engine, eval_qa_pairs, batch_runner):
eval_qs = [q for q, _ in eval_qa_pairs]
eval_answers = [a for _, a in eval_qa_pairs]
pred_responses = | get_responses(eval_qs, query_engine, show_progress=True) | llama_index.core.evaluation.eval_utils.get_responses |
get_ipython().system('pip install llama-index')
import os
os.environ["OPENAI_API_KEY"] = "sk-..."
import nest_asyncio
nest_asyncio.apply()
from llama_index.core import VectorStoreIndex, SimpleDirectoryReader
from llama_index.core.tools import QueryEngineTool, ToolMetadata
from llama_index.core.query_engine import SubQuestionQueryEngine
from llama_index.core.callbacks import CallbackManager, LlamaDebugHandler
from llama_index.core import Settings
llama_debug = | LlamaDebugHandler(print_trace_on_end=True) | llama_index.core.callbacks.LlamaDebugHandler |
get_ipython().run_line_magic('pip', 'install llama-index-llms-openai')
get_ipython().run_line_magic('pip', 'install llama-index-retrievers-bm25')
get_ipython().system('pip install llama-index')
import nest_asyncio
nest_asyncio.apply()
import os
import openai
os.environ["OPENAI_API_KEY"] = "sk-..."
openai.api_key = os.environ["OPENAI_API_KEY"]
import logging
import sys
logging.basicConfig(stream=sys.stdout, level=logging.INFO)
logging.getLogger().handlers = []
logging.getLogger().addHandler(logging.StreamHandler(stream=sys.stdout))
from llama_index.core import (
SimpleDirectoryReader,
StorageContext,
VectorStoreIndex,
)
from llama_index.retrievers.bm25 import BM25Retriever
from llama_index.core.retrievers import VectorIndexRetriever
from llama_index.core.node_parser import SentenceSplitter
from llama_index.llms.openai import OpenAI
get_ipython().system("mkdir -p 'data/paul_graham/'")
get_ipython().system("wget 'https://raw.githubusercontent.com/run-llama/llama_index/main/docs/examples/data/paul_graham/paul_graham_essay.txt' -O 'data/paul_graham/paul_graham_essay.txt'")
documents = SimpleDirectoryReader("./data/paul_graham").load_data()
llm = OpenAI(model="gpt-4")
splitter = SentenceSplitter(chunk_size=1024)
nodes = splitter.get_nodes_from_documents(documents)
storage_context = StorageContext.from_defaults()
storage_context.docstore.add_documents(nodes)
index = VectorStoreIndex(
nodes=nodes,
storage_context=storage_context,
)
retriever = BM25Retriever.from_defaults(nodes=nodes, similarity_top_k=2)
from llama_index.core.response.notebook_utils import display_source_node
nodes = retriever.retrieve("What happened at Viaweb and Interleaf?")
for node in nodes:
display_source_node(node)
nodes = retriever.retrieve("What did Paul Graham do after RISD?")
for node in nodes:
| display_source_node(node) | llama_index.core.response.notebook_utils.display_source_node |
get_ipython().run_line_magic('pip', 'install llama-index-vector-stores-chroma')
get_ipython().run_line_magic('pip', 'install llama-index-embeddings-huggingface')
get_ipython().system('pip install llama-index')
from llama_index.core import VectorStoreIndex, SimpleDirectoryReader
from llama_index.vector_stores.chroma import ChromaVectorStore
from llama_index.core import StorageContext
from llama_index.embeddings.huggingface import HuggingFaceEmbedding
from IPython.display import Markdown, display
import chromadb
import os
import getpass
os.environ["OPENAI_API_KEY"] = getpass.getpass("OpenAI API Key:")
import openai
openai.api_key = os.environ["OPENAI_API_KEY"]
get_ipython().system("mkdir -p 'data/paul_graham/'")
get_ipython().system("wget 'https://raw.githubusercontent.com/run-llama/llama_index/main/docs/examples/data/paul_graham/paul_graham_essay.txt' -O 'data/paul_graham/paul_graham_essay.txt'")
chroma_client = chromadb.EphemeralClient()
chroma_collection = chroma_client.create_collection("quickstart")
embed_model = HuggingFaceEmbedding(model_name="BAAI/bge-base-en-v1.5")
documents = SimpleDirectoryReader("./data/paul_graham/").load_data()
vector_store = | ChromaVectorStore(chroma_collection=chroma_collection) | llama_index.vector_stores.chroma.ChromaVectorStore |
get_ipython().run_line_magic('pip', 'install llama-index-readers-chroma')
get_ipython().system('pip install llama-index')
import logging
import sys
logging.basicConfig(stream=sys.stdout, level=logging.INFO)
logging.getLogger().addHandler(logging.StreamHandler(stream=sys.stdout))
from llama_index.readers.chroma import ChromaReader
reader = ChromaReader(
collection_name="chroma_collection",
persist_directory="examples/data_connectors/chroma_collection",
)
query_vector = [n1, n2, n3, ...]
documents = reader.load_data(
collection_name="demo", query_vector=query_vector, limit=5
)
from llama_index.core import SummaryIndex
index = | SummaryIndex.from_documents(documents) | llama_index.core.SummaryIndex.from_documents |
get_ipython().run_line_magic('pip', 'install llama-index-llms-gemini')
get_ipython().system('pip install -q llama-index google-generativeai')
get_ipython().run_line_magic('env', 'GOOGLE_API_KEY=...')
import os
GOOGLE_API_KEY = "" # add your GOOGLE API key here
os.environ["GOOGLE_API_KEY"] = GOOGLE_API_KEY
from llama_index.llms.gemini import Gemini
resp = Gemini().complete("Write a poem about a magic backpack")
print(resp)
from llama_index.core.llms import ChatMessage
from llama_index.llms.gemini import Gemini
messages = [
ChatMessage(role="user", content="Hello friend!"),
ChatMessage(role="assistant", content="Yarr what is shakin' matey?"),
ChatMessage(
role="user", content="Help me decide what to have for dinner."
),
]
resp = Gemini().chat(messages)
print(resp)
from llama_index.llms.gemini import Gemini
llm = | Gemini() | llama_index.llms.gemini.Gemini |
get_ipython().run_line_magic('pip', 'install llama-index-llms-openai')
get_ipython().system('pip install llama-index')
import os
import openai
os.environ["OPENAI_API_KEY"] = "sk-..."
openai.api_key = os.environ["OPENAI_API_KEY"]
import logging
import sys
logging.basicConfig(stream=sys.stdout, level=logging.WARNING)
logging.getLogger().addHandler(logging.StreamHandler(stream=sys.stdout))
import nest_asyncio
nest_asyncio.apply()
from llama_index.core import SimpleDirectoryReader, get_response_synthesizer
from llama_index.core import DocumentSummaryIndex
from llama_index.llms.openai import OpenAI
from llama_index.core.node_parser import SentenceSplitter
wiki_titles = ["Toronto", "Seattle", "Chicago", "Boston", "Houston"]
from pathlib import Path
import requests
for title in wiki_titles:
response = requests.get(
"https://en.wikipedia.org/w/api.php",
params={
"action": "query",
"format": "json",
"titles": title,
"prop": "extracts",
"explaintext": True,
},
).json()
page = next(iter(response["query"]["pages"].values()))
wiki_text = page["extract"]
data_path = Path("data")
if not data_path.exists():
Path.mkdir(data_path)
with open(data_path / f"{title}.txt", "w") as fp:
fp.write(wiki_text)
city_docs = []
for wiki_title in wiki_titles:
docs = SimpleDirectoryReader(
input_files=[f"data/{wiki_title}.txt"]
).load_data()
docs[0].doc_id = wiki_title
city_docs.extend(docs)
chatgpt = OpenAI(temperature=0, model="gpt-3.5-turbo")
splitter = SentenceSplitter(chunk_size=1024)
response_synthesizer = get_response_synthesizer(
response_mode="tree_summarize", use_async=True
)
doc_summary_index = DocumentSummaryIndex.from_documents(
city_docs,
llm=chatgpt,
transformations=[splitter],
response_synthesizer=response_synthesizer,
show_progress=True,
)
doc_summary_index.get_document_summary("Boston")
doc_summary_index.storage_context.persist("index")
from llama_index.core import load_index_from_storage
from llama_index.core import StorageContext
storage_context = StorageContext.from_defaults(persist_dir="index")
doc_summary_index = load_index_from_storage(storage_context)
query_engine = doc_summary_index.as_query_engine(
response_mode="tree_summarize", use_async=True
)
response = query_engine.query("What are the sports teams in Toronto?")
print(response)
from llama_index.core.indices.document_summary import (
DocumentSummaryIndexLLMRetriever,
)
retriever = DocumentSummaryIndexLLMRetriever(
doc_summary_index,
)
retrieved_nodes = retriever.retrieve("What are the sports teams in Toronto?")
print(len(retrieved_nodes))
print(retrieved_nodes[0].score)
print(retrieved_nodes[0].node.get_text())
from llama_index.core.query_engine import RetrieverQueryEngine
response_synthesizer = get_response_synthesizer(response_mode="tree_summarize")
query_engine = RetrieverQueryEngine(
retriever=retriever,
response_synthesizer=response_synthesizer,
)
response = query_engine.query("What are the sports teams in Toronto?")
print(response)
from llama_index.core.indices.document_summary import (
DocumentSummaryIndexEmbeddingRetriever,
)
retriever = DocumentSummaryIndexEmbeddingRetriever(
doc_summary_index,
)
retrieved_nodes = retriever.retrieve("What are the sports teams in Toronto?")
len(retrieved_nodes)
print(retrieved_nodes[0].node.get_text())
from llama_index.core.query_engine import RetrieverQueryEngine
response_synthesizer = | get_response_synthesizer(response_mode="tree_summarize") | llama_index.core.get_response_synthesizer |
from llama_index.agent import OpenAIAgent
import openai
openai.api_key = "sk-your-key"
from llama_index.tools.wikipedia.base import WikipediaToolSpec
from llama_index.tools.tool_spec.load_and_search.base import LoadAndSearchToolSpec
wiki_spec = WikipediaToolSpec()
tool = wiki_spec.to_tool_list()[1]
agent = OpenAIAgent.from_tools(
| LoadAndSearchToolSpec.from_defaults(tool) | llama_index.tools.tool_spec.load_and_search.base.LoadAndSearchToolSpec.from_defaults |
get_ipython().run_line_magic('pip', 'install llama-index-llms-gemini')
get_ipython().run_line_magic('pip', 'install llama-index-vector-stores-google')
get_ipython().run_line_magic('pip', 'install llama-index-indices-managed-google')
get_ipython().run_line_magic('pip', 'install llama-index-response-synthesizers-google')
get_ipython().run_line_magic('pip', 'install llama-index')
get_ipython().run_line_magic('pip', 'install "google-ai-generativelanguage>=0.4,<=1.0"')
get_ipython().run_line_magic('pip', 'install google-auth-oauthlib')
from google.oauth2 import service_account
from llama_index.vector_stores.google import set_google_config
credentials = service_account.Credentials.from_service_account_file(
"service_account_key.json",
scopes=[
"https://www.googleapis.com/auth/generative-language.retriever",
],
)
set_google_config(auth_credentials=credentials)
get_ipython().system("mkdir -p 'data/paul_graham/'")
get_ipython().system("wget 'https://raw.githubusercontent.com/run-llama/llama_index/main/docs/examples/data/paul_graham/paul_graham_essay.txt' -O 'data/paul_graham/paul_graham_essay.txt'")
import llama_index.core.vector_stores.google.generativeai.genai_extension as genaix
from typing import Iterable
from random import randrange
LLAMA_INDEX_COLAB_CORPUS_ID_PREFIX = f"llama-index-colab"
SESSION_CORPUS_ID_PREFIX = (
f"{LLAMA_INDEX_COLAB_CORPUS_ID_PREFIX}-{randrange(1000000)}"
)
def corpus_id(num_id: int) -> str:
return f"{SESSION_CORPUS_ID_PREFIX}-{num_id}"
SESSION_CORPUS_ID = corpus_id(1)
def list_corpora() -> Iterable[genaix.Corpus]:
client = genaix.build_semantic_retriever()
yield from genaix.list_corpora(client=client)
def delete_corpus(*, corpus_id: str) -> None:
client = | genaix.build_semantic_retriever() | llama_index.core.vector_stores.google.generativeai.genai_extension.build_semantic_retriever |
import openai
openai.api_key = "sk-your-api-key"
from llama_index.agent import OpenAIAgent
import requests
import yaml
f = requests.get(
"https://raw.githubusercontent.com/APIs-guru/openapi-directory/main/APIs/openai.com/1.2.0/openapi.yaml"
).text
open_api_spec = yaml.safe_load(f)
from llama_index.tools.openapi.base import OpenAPIToolSpec
from llama_index.tools.requests.base import RequestsToolSpec
from llama_index.tools.tool_spec.load_and_search.base import LoadAndSearchToolSpec
open_spec = OpenAPIToolSpec(open_api_spec)
open_spec = | OpenAPIToolSpec(
url="https://raw.githubusercontent.com/APIs-guru/openapi-directory/main/APIs/openai.com/1.2.0/openapi.yaml"
) | llama_index.tools.openapi.base.OpenAPIToolSpec |
get_ipython().run_line_magic('pip', 'install llama-index-embeddings-openai')
get_ipython().run_line_magic('pip', 'install llama-index-llms-openai')
get_ipython().system('pip install llama-index')
import nest_asyncio
nest_asyncio.apply()
import os
os.environ["OPENAI_API_KEY"] = "sk-..."
from llama_index.llms.openai import OpenAI
from llama_index.embeddings.openai import OpenAIEmbedding
from llama_index.core import Settings
Settings.llm = OpenAI(model="gpt-3.5-turbo-1106", temperature=0.2)
Settings.embed_model = OpenAIEmbedding(model="text-embedding-3-small")
from llama_index.core import SimpleDirectoryReader
documents = SimpleDirectoryReader("../data/paul_graham").load_data()
from llama_index.core import Settings
Settings.chunk_size = 1024
nodes = Settings.node_parser.get_nodes_from_documents(documents)
from llama_index.core import StorageContext
storage_context = StorageContext.from_defaults()
storage_context.docstore.add_documents(nodes)
from llama_index.core import SummaryIndex
from llama_index.core import VectorStoreIndex
summary_index = SummaryIndex(nodes, storage_context=storage_context)
vector_index = VectorStoreIndex(nodes, storage_context=storage_context)
list_query_engine = summary_index.as_query_engine(
response_mode="tree_summarize",
use_async=True,
)
vector_query_engine = vector_index.as_query_engine()
from llama_index.core.tools import QueryEngineTool
list_tool = QueryEngineTool.from_defaults(
query_engine=list_query_engine,
description=(
"Useful for summarization questions related to Paul Graham eassy on"
" What I Worked On."
),
)
vector_tool = QueryEngineTool.from_defaults(
query_engine=vector_query_engine,
description=(
"Useful for retrieving specific context from Paul Graham essay on What"
" I Worked On."
),
)
from llama_index.core.query_engine import RouterQueryEngine
from llama_index.core.selectors import LLMSingleSelector, LLMMultiSelector
from llama_index.core.selectors import (
PydanticMultiSelector,
PydanticSingleSelector,
)
query_engine = RouterQueryEngine(
selector=PydanticSingleSelector.from_defaults(),
query_engine_tools=[
list_tool,
vector_tool,
],
)
response = query_engine.query("What is the summary of the document?")
print(str(response))
response = query_engine.query("What did Paul Graham do after RICS?")
print(str(response))
query_engine = RouterQueryEngine(
selector=LLMSingleSelector.from_defaults(),
query_engine_tools=[
list_tool,
vector_tool,
],
)
response = query_engine.query("What is the summary of the document?")
print(str(response))
response = query_engine.query("What did Paul Graham do after RICS?")
print(str(response))
print(str(response.metadata["selector_result"]))
from llama_index.core import SimpleKeywordTableIndex
keyword_index = SimpleKeywordTableIndex(nodes, storage_context=storage_context)
keyword_tool = QueryEngineTool.from_defaults(
query_engine=vector_query_engine,
description=(
"Useful for retrieving specific context using keywords from Paul"
" Graham essay on What I Worked On."
),
)
query_engine = RouterQueryEngine(
selector= | PydanticMultiSelector.from_defaults() | llama_index.core.selectors.PydanticMultiSelector.from_defaults |
import openai
openai.api_key = "sk-your-key"
from llama_index.agent import OpenAIAgent
from llama_index.tools.azure_cv.base import AzureCVToolSpec
cv_tool = | AzureCVToolSpec(api_key="your-key", resource="your-resource") | llama_index.tools.azure_cv.base.AzureCVToolSpec |
get_ipython().run_line_magic('pip', 'install llama-index-agent-openai')
get_ipython().run_line_magic('pip', 'install llama-index-llms-openai')
from llama_index.agent.openai import OpenAIAgent
from llama_index.llms.openai import OpenAI
from llama_index.core.tools import BaseTool, FunctionTool
def multiply(a: int, b: int) -> int:
"""Multiple two integers and returns the result integer"""
return a * b
multiply_tool = FunctionTool.from_defaults(fn=multiply)
def add(a: int, b: int) -> int:
"""Add two integers and returns the result integer"""
return a + b
add_tool = FunctionTool.from_defaults(fn=add)
llm = OpenAI(model="gpt-3.5-turbo-1106")
agent = OpenAIAgent.from_tools(
[multiply_tool, add_tool], llm=llm, verbose=True
)
response = agent.chat("What is (121 * 3) + 42?")
print(str(response))
response = agent.stream_chat("What is (121 * 3) + 42?")
import nest_asyncio
nest_asyncio.apply()
response = await agent.achat("What is (121 * 3) + 42?")
print(str(response))
response = await agent.astream_chat("What is (121 * 3) + 42?")
response_gen = response.response_gen
async for token in response.async_response_gen():
print(token, end="")
import json
def get_current_weather(location, unit="fahrenheit"):
"""Get the current weather in a given location"""
if "tokyo" in location.lower():
return json.dumps(
{"location": location, "temperature": "10", "unit": "celsius"}
)
elif "san francisco" in location.lower():
return json.dumps(
{"location": location, "temperature": "72", "unit": "fahrenheit"}
)
else:
return json.dumps(
{"location": location, "temperature": "22", "unit": "celsius"}
)
weather_tool = FunctionTool.from_defaults(fn=get_current_weather)
llm = OpenAI(model="gpt-3.5-turbo-1106")
agent = OpenAIAgent.from_tools([weather_tool], llm=llm, verbose=True)
response = agent.chat(
"What's the weather like in San Francisco, Tokyo, and Paris?"
)
llm = OpenAI(model="gpt-3.5-turbo-0613")
agent = | OpenAIAgent.from_tools([weather_tool], llm=llm, verbose=True) | llama_index.agent.openai.OpenAIAgent.from_tools |
get_ipython().run_line_magic('load_ext', 'autoreload')
get_ipython().run_line_magic('autoreload', '2')
from llama_index import download_loader
from base import DocugamiReader
docset_id = "ecxqpipcoe2p"
document_ids = ["43rj0ds7s0ur", "bpc1vibyeke2"]
loader = DocugamiReader()
documents = loader.load_data(docset_id=docset_id, document_ids=document_ids)
from llama_index import VectorStoreIndex
docset_id = "wh2kned25uqm"
documents = loader.load_data(docset_id=docset_id)
for d in documents:
stripped_metadata = d.metadata.copy()
for key in d.metadata:
if key not in ["name", "xpath", "id", "structure"]:
del stripped_metadata[key]
d.metadata = stripped_metadata
documents
index = VectorStoreIndex.from_documents(documents)
query_engine = index.as_query_engine(similarity_top_k=5)
response = query_engine.query("What can tenants do with signage on their properties?")
print(response.response)
for node in response.source_nodes:
print(node)
response = query_engine.query(
"What is the security deposit for the property owned by Birch Street?"
)
print(response.response) # the correct answer should be $78,000
for node in response.source_nodes:
print(node.node.extra_info["name"])
print(node.node.text)
docset_id = "wh2kned25uqm"
documents = loader.load_data(docset_id=docset_id)
documents[0].metadata
index = | VectorStoreIndex.from_documents(documents) | llama_index.VectorStoreIndex.from_documents |
get_ipython().run_line_magic('pip', 'install llama-index-postprocessor-longllmlingua')
get_ipython().system('pip install llmlingua llama-index')
import openai
openai.api_key = "<insert_openai_key>"
get_ipython().system('wget "https://www.dropbox.com/s/f6bmb19xdg0xedm/paul_graham_essay.txt?dl=1" -O paul_graham_essay.txt')
from llama_index.core import (
VectorStoreIndex,
SimpleDirectoryReader,
load_index_from_storage,
StorageContext,
)
documents = SimpleDirectoryReader(
input_files=["paul_graham_essay.txt"]
).load_data()
index = VectorStoreIndex.from_documents(documents)
retriever = index.as_retriever(similarity_top_k=2)
query_str = "Where did the author go for art school?"
results = retriever.retrieve(query_str)
print(results)
results
from llama_index.core.query_engine import RetrieverQueryEngine
from llama_index.core.response_synthesizers import CompactAndRefine
from llama_index.postprocessor.longllmlingua import LongLLMLinguaPostprocessor
node_postprocessor = LongLLMLinguaPostprocessor(
instruction_str="Given the context, please answer the final question",
target_token=300,
rank_method="longllmlingua",
additional_compress_kwargs={
"condition_compare": True,
"condition_in_question": "after",
"context_budget": "+100",
"reorder_context": "sort", # enable document reorder
},
)
retrieved_nodes = retriever.retrieve(query_str)
synthesizer = | CompactAndRefine() | llama_index.core.response_synthesizers.CompactAndRefine |
get_ipython().run_line_magic('pip', 'install llama-index-packs-node-parser-semantic-chunking')
get_ipython().run_line_magic('pip', 'install llama-index-embeddings-openai')
get_ipython().run_line_magic('pip', 'install llama-hub-llama-packs-node-parser-semantic-chunking-base')
from llama_index.core import SimpleDirectoryReader
get_ipython().system("wget 'https://raw.githubusercontent.com/run-llama/llama_index/main/docs/examples/data/paul_graham/paul_graham_essay.txt' -O 'pg_essay.txt'")
documents = SimpleDirectoryReader(input_files=["pg_essay.txt"]).load_data()
from llama_index.packs.node_parser_semantic_chunking.base import SemanticChunker
from llama_index.core.llama_pack import download_llama_pack
download_llama_pack(
"SemanticChunkingQueryEnginePack",
"./semantic_chunking_pack",
skip_load=True,
)
from semantic_chunking_pack.base import SemanticChunker
from llama_index.core.node_parser import SentenceSplitter
from llama_index.embeddings.openai import OpenAIEmbedding
embed_model = OpenAIEmbedding()
splitter = SemanticChunker(
buffer_size=1, breakpoint_percentile_threshold=95, embed_model=embed_model
)
base_splitter = SentenceSplitter(chunk_size=512)
nodes = splitter.get_nodes_from_documents(documents)
print(nodes[1].get_content())
print(nodes[2].get_content())
print(nodes[3].get_content())
base_nodes = base_splitter.get_nodes_from_documents(documents)
print(base_nodes[2].get_content())
from llama_index.core import VectorStoreIndex
from llama_index.core.response.notebook_utils import display_source_node
vector_index = VectorStoreIndex(nodes)
query_engine = vector_index.as_query_engine()
base_vector_index = VectorStoreIndex(base_nodes)
base_query_engine = base_vector_index.as_query_engine()
response = query_engine.query(
"Tell me about the author's programming journey through childhood to college"
)
print(str(response))
for n in response.source_nodes:
display_source_node(n, source_length=20000)
base_response = base_query_engine.query(
"Tell me about the author's programming journey through childhood to college"
)
print(str(base_response))
for n in base_response.source_nodes:
display_source_node(n, source_length=20000)
response = query_engine.query("Tell me about the author's experience in YC")
print(str(response))
base_response = base_query_engine.query("Tell me about the author's experience in YC")
print(str(base_response))
from llama_index.packs.node_parser_semantic_chunking import (
SemanticChunkingQueryEnginePack,
)
from llama_index.core.llama_pack import download_llama_pack
pack = | SemanticChunkingQueryEnginePack(documents) | llama_index.packs.node_parser_semantic_chunking.SemanticChunkingQueryEnginePack |
get_ipython().run_line_magic('pip', 'install llama-index-agent-openai')
get_ipython().run_line_magic('pip', 'install llama-index-vector-stores-supabase')
get_ipython().system('pip install llama-index')
from llama_index.agent.openai import OpenAIAssistantAgent
agent = OpenAIAssistantAgent.from_new(
name="Math Tutor",
instructions="You are a personal math tutor. Write and run code to answer math questions.",
openai_tools=[{"type": "code_interpreter"}],
instructions_prefix="Please address the user as Jane Doe. The user has a premium account.",
)
agent.thread_id
response = agent.chat(
"I need to solve the equation `3x + 11 = 14`. Can you help me?"
)
print(str(response))
from llama_index.agent.openai import OpenAIAssistantAgent
agent = OpenAIAssistantAgent.from_new(
name="SEC Analyst",
instructions="You are a QA assistant designed to analyze sec filings.",
openai_tools=[{"type": "retrieval"}],
instructions_prefix="Please address the user as Jerry.",
files=["data/10k/lyft_2021.pdf"],
verbose=True,
)
response = agent.chat("What was Lyft's revenue growth in 2021?")
print(str(response))
from llama_index.agent.openai import OpenAIAssistantAgent
from llama_index.core import (
SimpleDirectoryReader,
VectorStoreIndex,
StorageContext,
load_index_from_storage,
)
from llama_index.core.tools import QueryEngineTool, ToolMetadata
try:
storage_context = StorageContext.from_defaults(
persist_dir="./storage/lyft"
)
lyft_index = load_index_from_storage(storage_context)
storage_context = StorageContext.from_defaults(
persist_dir="./storage/uber"
)
uber_index = load_index_from_storage(storage_context)
index_loaded = True
except:
index_loaded = False
get_ipython().system("mkdir -p 'data/10k/'")
get_ipython().system("wget 'https://raw.githubusercontent.com/run-llama/llama_index/main/docs/examples/data/10k/uber_2021.pdf' -O 'data/10k/uber_2021.pdf'")
get_ipython().system("wget 'https://raw.githubusercontent.com/run-llama/llama_index/main/docs/examples/data/10k/lyft_2021.pdf' -O 'data/10k/lyft_2021.pdf'")
if not index_loaded:
lyft_docs = SimpleDirectoryReader(
input_files=["./data/10k/lyft_2021.pdf"]
).load_data()
uber_docs = SimpleDirectoryReader(
input_files=["./data/10k/uber_2021.pdf"]
).load_data()
lyft_index = VectorStoreIndex.from_documents(lyft_docs)
uber_index = VectorStoreIndex.from_documents(uber_docs)
lyft_index.storage_context.persist(persist_dir="./storage/lyft")
uber_index.storage_context.persist(persist_dir="./storage/uber")
lyft_engine = lyft_index.as_query_engine(similarity_top_k=3)
uber_engine = uber_index.as_query_engine(similarity_top_k=3)
query_engine_tools = [
QueryEngineTool(
query_engine=lyft_engine,
metadata=ToolMetadata(
name="lyft_10k",
description=(
"Provides information about Lyft financials for year 2021. "
"Use a detailed plain text question as input to the tool."
),
),
),
QueryEngineTool(
query_engine=uber_engine,
metadata=ToolMetadata(
name="uber_10k",
description=(
"Provides information about Uber financials for year 2021. "
"Use a detailed plain text question as input to the tool."
),
),
),
]
agent = OpenAIAssistantAgent.from_new(
name="SEC Analyst",
instructions="You are a QA assistant designed to analyze sec filings.",
tools=query_engine_tools,
instructions_prefix="Please address the user as Jerry.",
verbose=True,
run_retrieve_sleep_time=1.0,
)
response = agent.chat("What was Lyft's revenue growth in 2021?")
from llama_index.agent.openai import OpenAIAssistantAgent
from llama_index.core import (
SimpleDirectoryReader,
VectorStoreIndex,
StorageContext,
)
from llama_index.vector_stores.supabase import SupabaseVectorStore
from llama_index.core.tools import QueryEngineTool, ToolMetadata
get_ipython().system("mkdir -p 'data/10k/'")
get_ipython().system("wget 'https://raw.githubusercontent.com/run-llama/llama_index/main/docs/examples/data/10k/uber_2021.pdf' -O 'data/10k/uber_2021.pdf'")
get_ipython().system("wget 'https://raw.githubusercontent.com/run-llama/llama_index/main/docs/examples/data/10k/lyft_2021.pdf' -O 'data/10k/lyft_2021.pdf'")
reader = SimpleDirectoryReader(input_files=["./data/10k/lyft_2021.pdf"])
docs = reader.load_data()
for doc in docs:
doc.id_ = "lyft_docs"
vector_store = | SupabaseVectorStore(
postgres_connection_string=(
"postgresql://<user>:<password>@<host>:<port>/<db_name>"
) | llama_index.vector_stores.supabase.SupabaseVectorStore |
get_ipython().run_line_magic('pip', 'install llama-index-readers-file')
get_ipython().run_line_magic('pip', 'install llama-index-llms-openai')
get_ipython().run_line_magic('pip', 'install llama-index-embeddings-openai')
get_ipython().system('pip install llama-index')
import pandas as pd
pd.set_option("display.max_rows", None)
pd.set_option("display.max_columns", None)
pd.set_option("display.width", None)
pd.set_option("display.max_colwidth", None)
get_ipython().system('wget "https://www.dropbox.com/scl/fi/mlaymdy1ni1ovyeykhhuk/tesla_2021_10k.htm?rlkey=qf9k4zn0ejrbm716j0gg7r802&dl=1" -O tesla_2021_10k.htm')
get_ipython().system('wget "https://www.dropbox.com/scl/fi/rkw0u959yb4w8vlzz76sa/tesla_2020_10k.htm?rlkey=tfkdshswpoupav5tqigwz1mp7&dl=1" -O tesla_2020_10k.htm')
from llama_index.readers.file import FlatReader
from pathlib import Path
reader = | FlatReader() | llama_index.readers.file.FlatReader |
from llama_index import VectorStoreIndex, SimpleDirectoryReader
documents = SimpleDirectoryReader(
"../../examples/data/paul_graham"
).load_data()
index = VectorStoreIndex.from_documents(documents)
import pinecone
from llama_index import VectorStoreIndex, SimpleDirectoryReader, StorageContext
from llama_index.vector_stores import PineconeVectorStore
pinecone.init(api_key="<api_key>", environment="<environment>")
pinecone.create_index(
"quickstart", dimension=1536, metric="euclidean", pod_type="p1"
)
storage_context = StorageContext.from_defaults(
vector_store=PineconeVectorStore(pinecone.Index("quickstart"))
)
documents = SimpleDirectoryReader(
"../../examples/data/paul_graham"
).load_data()
index = VectorStoreIndex.from_documents(
documents, storage_context=storage_context
)
vector_store = PineconeVectorStore(pinecone.Index("quickstart"))
index = VectorStoreIndex.from_vector_store(vector_store=vector_store)
query_engine = index.as_query_engine()
response = query_engine.query("What did the author do growing up?")
from llama_index.vector_stores.types import ExactMatchFilter, MetadataFilters
query_engine = index.as_query_engine(
similarity_top_k=3,
vector_store_query_mode="default",
filters=MetadataFilters(
filters=[
| ExactMatchFilter(key="name", value="paul graham") | llama_index.vector_stores.types.ExactMatchFilter |
get_ipython().run_line_magic('pip', 'install llama-index-llms-openai')
get_ipython().run_line_magic('pip', 'install llama-index-readers-file')
get_ipython().run_line_magic('load_ext', 'autoreload')
get_ipython().run_line_magic('autoreload', '2')
get_ipython().run_line_magic('env', 'OPENAI_API_KEY=')
get_ipython().run_line_magic('env', 'BRAINTRUST_API_KEY=')
get_ipython().run_line_magic('env', 'TOKENIZERS_PARALLELISM=true # This is needed to avoid a warning message from Chroma')
get_ipython().run_line_magic('pip', 'install -U llama_hub llama_index braintrust autoevals pypdf pillow transformers torch torchvision')
get_ipython().system('mkdir data')
get_ipython().system('wget --user-agent "Mozilla" "https://arxiv.org/pdf/2307.09288.pdf" -O "data/llama2.pdf"')
from pathlib import Path
from llama_index.readers.file import PDFReader
from llama_index.core.response.notebook_utils import display_source_node
from llama_index.core.retrievers import RecursiveRetriever
from llama_index.core.query_engine import RetrieverQueryEngine
from llama_index.core import VectorStoreIndex
from llama_index.llms.openai import OpenAI
import json
loader = PDFReader()
docs0 = loader.load_data(file=Path("./data/llama2.pdf"))
from llama_index.core import Document
doc_text = "\n\n".join([d.get_content() for d in docs0])
docs = [Document(text=doc_text)]
from llama_index.core.node_parser import SentenceSplitter
from llama_index.core.schema import IndexNode
node_parser = SentenceSplitter(chunk_size=1024)
base_nodes = node_parser.get_nodes_from_documents(docs)
for idx, node in enumerate(base_nodes):
node.id_ = f"node-{idx}"
from llama_index.core.embeddings import resolve_embed_model
embed_model = resolve_embed_model("local:BAAI/bge-small-en")
llm = OpenAI(model="gpt-3.5-turbo")
base_index = VectorStoreIndex(base_nodes, embed_model=embed_model)
base_retriever = base_index.as_retriever(similarity_top_k=2)
retrievals = base_retriever.retrieve(
"Can you tell me about the key concepts for safety finetuning"
)
for n in retrievals:
display_source_node(n, source_length=1500)
query_engine_base = RetrieverQueryEngine.from_args(base_retriever, llm=llm)
response = query_engine_base.query(
"Can you tell me about the key concepts for safety finetuning"
)
print(str(response))
sub_chunk_sizes = [128, 256, 512]
sub_node_parsers = [SentenceSplitter(chunk_size=c) for c in sub_chunk_sizes]
all_nodes = []
for base_node in base_nodes:
for n in sub_node_parsers:
sub_nodes = n.get_nodes_from_documents([base_node])
sub_inodes = [
IndexNode.from_text_node(sn, base_node.node_id) for sn in sub_nodes
]
all_nodes.extend(sub_inodes)
original_node = IndexNode.from_text_node(base_node, base_node.node_id)
all_nodes.append(original_node)
all_nodes_dict = {n.node_id: n for n in all_nodes}
vector_index_chunk = VectorStoreIndex(all_nodes, embed_model=embed_model)
vector_retriever_chunk = vector_index_chunk.as_retriever(similarity_top_k=2)
retriever_chunk = RecursiveRetriever(
"vector",
retriever_dict={"vector": vector_retriever_chunk},
node_dict=all_nodes_dict,
verbose=True,
)
nodes = retriever_chunk.retrieve(
"Can you tell me about the key concepts for safety finetuning"
)
for node in nodes:
display_source_node(node, source_length=2000)
query_engine_chunk = RetrieverQueryEngine.from_args(retriever_chunk, llm=llm)
response = query_engine_chunk.query(
"Can you tell me about the key concepts for safety finetuning"
)
print(str(response))
from llama_index.core.node_parser import SentenceSplitter
from llama_index.core.schema import IndexNode
from llama_index.core.extractors import (
SummaryExtractor,
QuestionsAnsweredExtractor,
)
extractors = [
SummaryExtractor(summaries=["self"], show_progress=True),
| QuestionsAnsweredExtractor(questions=5, show_progress=True) | llama_index.core.extractors.QuestionsAnsweredExtractor |
get_ipython().run_line_magic('pip', 'install llama-index-callbacks-aim')
get_ipython().system('pip install llama-index')
from llama_index.core.callbacks import CallbackManager
from llama_index.callbacks.aim import AimCallback
from llama_index.core import SummaryIndex
from llama_index.core import SimpleDirectoryReader
get_ipython().system("mkdir -p 'data/paul_graham/'")
get_ipython().system("wget 'https://raw.githubusercontent.com/run-llama/llama_index/main/docs/examples/data/paul_graham/paul_graham_essay.txt' -O 'data/paul_graham/paul_graham_essay.txt'")
docs = SimpleDirectoryReader("./data/paul_graham").load_data()
aim_callback = AimCallback(repo="./")
callback_manager = | CallbackManager([aim_callback]) | llama_index.core.callbacks.CallbackManager |
get_ipython().run_line_magic('pip', 'install llama-index-postprocessor-rankgpt-rerank')
get_ipython().run_line_magic('pip', 'install llama-index-embeddings-openai')
get_ipython().run_line_magic('pip', 'install llama-index-packs-infer-retrieve-rerank')
get_ipython().run_line_magic('pip', 'install llama-index-llms-openai')
import datasets
dataset = datasets.load_dataset("BioDEX/BioDEX-ICSR")
dataset
from llama_index.core import get_tokenizer
import re
from typing import Set, List
tokenizer = | get_tokenizer() | llama_index.core.get_tokenizer |
get_ipython().run_line_magic('pip', 'install llama-index-llms-openai')
get_ipython().run_line_magic('pip', 'install llama-index-indices-managed-vectara')
get_ipython().system('pip install llama-index')
import logging
import sys
logging.basicConfig(stream=sys.stdout, level=logging.INFO)
logging.getLogger().addHandler(logging.StreamHandler(stream=sys.stdout))
from llama_index.core.schema import TextNode
from llama_index.core.indices.managed.types import ManagedIndexQueryMode
from llama_index.indices.managed.vectara import VectaraIndex
from llama_index.indices.managed.vectara import VectaraAutoRetriever
from llama_index.core.vector_stores import MetadataInfo, VectorStoreInfo
from llama_index.llms.openai import OpenAI
nodes = [
TextNode(
text=(
"A pragmatic paleontologist touring an almost complete theme park on an island "
+ "in Central America is tasked with protecting a couple of kids after a power "
+ "failure causes the park's cloned dinosaurs to run loose."
),
metadata={"year": 1993, "rating": 7.7, "genre": "science fiction"},
),
TextNode(
text=(
"A thief who steals corporate secrets through the use of dream-sharing technology "
+ "is given the inverse task of planting an idea into the mind of a C.E.O., "
+ "but his tragic past may doom the project and his team to disaster."
),
metadata={
"year": 2010,
"director": "Christopher Nolan",
"rating": 8.2,
},
),
TextNode(
text="Barbie suffers a crisis that leads her to question her world and her existence.",
metadata={
"year": 2023,
"director": "Greta Gerwig",
"genre": "fantasy",
"rating": 9.5,
},
),
TextNode(
text=(
"A cowboy doll is profoundly threatened and jealous when a new spaceman action "
+ "figure supplants him as top toy in a boy's bedroom."
),
metadata={"year": 1995, "genre": "animated", "rating": 8.3},
),
TextNode(
text=(
"When Woody is stolen by a toy collector, Buzz and his friends set out on a "
+ "rescue mission to save Woody before he becomes a museum toy property with his "
+ "roundup gang Jessie, Prospector, and Bullseye. "
),
metadata={"year": 1999, "genre": "animated", "rating": 7.9},
),
| TextNode(
text=(
"The toys are mistakenly delivered to a day-care center instead of the attic "
+ "right before Andy leaves for college, and it's up to Woody to convince the "
+ "other toys that they weren't abandoned and to return home."
) | llama_index.core.schema.TextNode |
get_ipython().run_line_magic('pip', 'install llama-index-postprocessor-rankgpt-rerank')
get_ipython().run_line_magic('pip', 'install llama-index-embeddings-openai')
get_ipython().run_line_magic('pip', 'install llama-index-packs-infer-retrieve-rerank')
get_ipython().run_line_magic('pip', 'install llama-index-llms-openai')
import datasets
dataset = datasets.load_dataset("BioDEX/BioDEX-ICSR")
dataset
from llama_index.core import get_tokenizer
import re
from typing import Set, List
tokenizer = get_tokenizer()
sample_size = 5
def get_reactions_row(raw_target: str) -> List[str]:
"""Get reactions from a single row."""
reaction_pattern = re.compile(r"reactions:\s*(.*)")
reaction_match = reaction_pattern.search(raw_target)
if reaction_match:
reactions = reaction_match.group(1).split(",")
reactions = [r.strip().lower() for r in reactions]
else:
reactions = []
return reactions
def get_reactions_set(dataset) -> Set[str]:
"""Get set of all reactions."""
reactions = set()
for data in dataset["train"]:
reactions.update(set(get_reactions_row(data["target"])))
return reactions
def get_samples(dataset, sample_size: int = 5):
"""Get processed sample.
Contains source text and also the reaction label.
Parse reaction text to specifically extract reactions.
"""
samples = []
for idx, data in enumerate(dataset["train"]):
if idx >= sample_size:
break
text = data["fulltext_processed"]
raw_target = data["target"]
reactions = get_reactions_row(raw_target)
samples.append({"text": text, "reactions": reactions})
return samples
from llama_index.packs.infer_retrieve_rerank import InferRetrieveRerankPack
from llama_index.core.llama_pack import download_llama_pack
InferRetrieveRerankPack = download_llama_pack(
"InferRetrieveRerankPack",
"./irr_pack",
)
from llama_index.llms.openai import OpenAI
llm = OpenAI(model="gpt-3.5-turbo-16k")
pred_context = """\
The output predictins should be a list of comma-separated adverse \
drug reactions. \
"""
reranker_top_n = 10
pack = InferRetrieveRerankPack(
get_reactions_set(dataset),
llm=llm,
pred_context=pred_context,
reranker_top_n=reranker_top_n,
verbose=True,
)
samples = get_samples(dataset, sample_size=5)
pred_reactions = pack.run(inputs=[s["text"] for s in samples])
gt_reactions = [s["reactions"] for s in samples]
pred_reactions[2]
gt_reactions[2]
from llama_index.core.retrievers import BaseRetriever
from llama_index.core.llms import LLM
from llama_index.llms.openai import OpenAI
from llama_index.core import PromptTemplate
from llama_index.core.query_pipeline import QueryPipeline
from llama_index.core.postprocessor.types import BaseNodePostprocessor
from llama_index.postprocessor.rankgpt_rerank import RankGPTRerank
from llama_index.core.output_parsers import ChainableOutputParser
from typing import List
import random
all_reactions = get_reactions_set(dataset)
random.sample(all_reactions, 5)
from llama_index.core.schema import TextNode
from llama_index.embeddings.openai import OpenAIEmbedding
from llama_index.core.ingestion import IngestionPipeline
from llama_index.core import VectorStoreIndex
reaction_nodes = [ | TextNode(text=r) | llama_index.core.schema.TextNode |
get_ipython().run_line_magic('pip', 'install llama-index-vector-stores-lancedb')
get_ipython().system('pip install llama-index')
import logging
import sys
from llama_index.core import SimpleDirectoryReader, Document, StorageContext
from llama_index.core import VectorStoreIndex
from llama_index.vector_stores.lancedb import LanceDBVectorStore
import textwrap
import openai
openai.api_key = ""
get_ipython().system("mkdir -p 'data/paul_graham/'")
get_ipython().system("wget 'https://raw.githubusercontent.com/run-llama/llama_index/main/docs/examples/data/paul_graham/paul_graham_essay.txt' -O 'data/paul_graham/paul_graham_essay.txt'")
documents = SimpleDirectoryReader("./data/paul_graham/").load_data()
print("Document ID:", documents[0].doc_id, "Document Hash:", documents[0].hash)
vector_store = | LanceDBVectorStore(uri="/tmp/lancedb") | llama_index.vector_stores.lancedb.LanceDBVectorStore |
get_ipython().run_line_magic('pip', 'install llama-hub-llama-packs-agents-llm-compiler-step')
get_ipython().run_line_magic('pip', 'install llama-index-readers-wikipedia')
get_ipython().run_line_magic('pip', 'install llama-index-llms-openai')
import phoenix as px
px.launch_app()
import llama_index.core
llama_index.core.set_global_handler("arize_phoenix")
import nest_asyncio
nest_asyncio.apply()
from llama_index.packs.agents.llm_compiler.step import LLMCompilerAgentWorker
from llama_index.core.llama_pack import download_llama_pack
download_llama_pack(
"LLMCompilerAgentPack",
"./agent_pack",
skip_load=True,
)
from agent_pack.step import LLMCompilerAgentWorker
import json
from typing import Sequence, List
from llama_index.llms.openai import OpenAI
from llama_index.core.llms import ChatMessage
from llama_index.core.tools import BaseTool, FunctionTool
import nest_asyncio
nest_asyncio.apply()
def multiply(a: int, b: int) -> int:
"""Multiple two integers and returns the result integer"""
return a * b
multiply_tool = FunctionTool.from_defaults(fn=multiply)
def add(a: int, b: int) -> int:
"""Add two integers and returns the result integer"""
return a + b
add_tool = FunctionTool.from_defaults(fn=add)
tools = [multiply_tool, add_tool]
multiply_tool.metadata.fn_schema_str
from llama_index.core.agent import AgentRunner
llm = OpenAI(model="gpt-4")
callback_manager = llm.callback_manager
agent_worker = LLMCompilerAgentWorker.from_tools(
tools, llm=llm, verbose=True, callback_manager=callback_manager
)
agent = | AgentRunner(agent_worker, callback_manager=callback_manager) | llama_index.core.agent.AgentRunner |
get_ipython().run_line_magic('pip', 'install llama-index-storage-docstore-redis')
get_ipython().run_line_magic('pip', 'install llama-index-storage-docstore-mongodb')
get_ipython().run_line_magic('pip', 'install llama-index-embeddings-huggingface')
get_ipython().system('mkdir -p data')
get_ipython().system('echo "This is a test file: one!" > data/test1.txt')
get_ipython().system('echo "This is a test file: two!" > data/test2.txt')
from llama_index.core import SimpleDirectoryReader
documents = SimpleDirectoryReader("./data", filename_as_id=True).load_data()
from llama_index.embeddings.huggingface import HuggingFaceEmbedding
from llama_index.core.ingestion import IngestionPipeline
from llama_index.core.storage.docstore import SimpleDocumentStore
from llama_index.storage.docstore.redis import RedisDocumentStore
from llama_index.storage.docstore.mongodb import MongoDocumentStore
from llama_index.core.node_parser import SentenceSplitter
pipeline = IngestionPipeline(
transformations=[
SentenceSplitter(),
HuggingFaceEmbedding(model_name="BAAI/bge-small-en-v1.5"),
],
docstore=SimpleDocumentStore(),
)
nodes = pipeline.run(documents=documents)
print(f"Ingested {len(nodes)} Nodes")
pipeline.persist("./pipeline_storage")
pipeline = IngestionPipeline(
transformations=[
SentenceSplitter(),
| HuggingFaceEmbedding(model_name="BAAI/bge-small-en-v1.5") | llama_index.embeddings.huggingface.HuggingFaceEmbedding |
get_ipython().run_line_magic('pip', 'install llama-index-llms-llama-cpp')
get_ipython().system('pip install llama-index lm-format-enforcer llama-cpp-python')
import lmformatenforcer
import re
from llama_index.core.prompts.lmformatenforcer_utils import (
activate_lm_format_enforcer,
build_lm_format_enforcer_function,
)
regex = r'"Hello, my name is (?P<name>[a-zA-Z]*)\. I was born in (?P<hometown>[a-zA-Z]*). Nice to meet you!"'
from llama_index.llms.llama_cpp import LlamaCPP
llm = LlamaCPP()
regex_parser = lmformatenforcer.RegexParser(regex)
lm_format_enforcer_fn = | build_lm_format_enforcer_function(llm, regex_parser) | llama_index.core.prompts.lmformatenforcer_utils.build_lm_format_enforcer_function |
get_ipython().run_line_magic('pip', 'install llama-index-readers-wikipedia')
get_ipython().run_line_magic('pip', 'install llama-index-finetuning')
get_ipython().run_line_magic('pip', 'install llama-index-llms-openai')
get_ipython().run_line_magic('pip', 'install llama-index-finetuning-callbacks')
get_ipython().run_line_magic('pip', 'install llama-index-llms-huggingface')
import nest_asyncio
nest_asyncio.apply()
import os
HUGGING_FACE_TOKEN = os.getenv("HUGGING_FACE_TOKEN")
OPENAI_API_KEY = os.getenv("OPENAI_API_KEY")
get_ipython().system('pip install wikipedia -q')
from llama_index.readers.wikipedia import WikipediaReader
cities = [
"San Francisco",
"Toronto",
"New York",
"Vancouver",
"Montreal",
"Tokyo",
"Singapore",
"Paris",
]
documents = WikipediaReader().load_data(
pages=[f"History of {x}" for x in cities]
)
QUESTION_GEN_PROMPT = (
"You are a Teacher/ Professor. Your task is to setup "
"a quiz/examination. Using the provided context, formulate "
"a single question that captures an important fact from the "
"context. Restrict the question to the context information provided."
)
from llama_index.core.evaluation import DatasetGenerator
from llama_index.llms.openai import OpenAI
gpt_35_llm = OpenAI(model="gpt-3.5-turbo", temperature=0.3)
dataset_generator = DatasetGenerator.from_documents(
documents,
question_gen_query=QUESTION_GEN_PROMPT,
llm=gpt_35_llm,
num_questions_per_chunk=25,
)
qrd = dataset_generator.generate_dataset_from_nodes(num=350)
from llama_index.core import VectorStoreIndex
from llama_index.core.retrievers import VectorIndexRetriever
the_index = VectorStoreIndex.from_documents(documents=documents)
the_retriever = VectorIndexRetriever(
index=the_index,
similarity_top_k=2,
)
from llama_index.core.query_engine import RetrieverQueryEngine
from llama_index.llms.huggingface import HuggingFaceInferenceAPI
llm = HuggingFaceInferenceAPI(
model_name="meta-llama/Llama-2-7b-chat-hf",
context_window=2048, # to use refine
token=HUGGING_FACE_TOKEN,
)
query_engine = RetrieverQueryEngine.from_args(retriever=the_retriever, llm=llm)
import tqdm
train_dataset = []
num_train_questions = int(0.65 * len(qrd.qr_pairs))
for q, a in tqdm.tqdm(qrd.qr_pairs[:num_train_questions]):
data_entry = {"question": q, "reference": a}
response = query_engine.query(q)
response_struct = {}
response_struct["model"] = "llama-2"
response_struct["text"] = str(response)
response_struct["context"] = (
response.source_nodes[0].node.text[:1000] + "..."
)
data_entry["response_data"] = response_struct
train_dataset.append(data_entry)
from llama_index.llms.openai import OpenAI
from llama_index.finetuning.callbacks import OpenAIFineTuningHandler
from llama_index.core.callbacks import CallbackManager
from llama_index.core.evaluation import CorrectnessEvaluator
finetuning_handler = OpenAIFineTuningHandler()
callback_manager = | CallbackManager([finetuning_handler]) | llama_index.core.callbacks.CallbackManager |
get_ipython().run_line_magic('pip', 'install llama-index-llms-openai')
get_ipython().run_line_magic('pip', 'install llama-index-readers-wikipedia')
from llama_index.core.llama_dataset import (
LabelledRagDataExample,
CreatedByType,
CreatedBy,
)
query = "This is a test query, is it not?"
query_by = CreatedBy(type=CreatedByType.AI, model_name="gpt-4")
reference_answer = "Yes it is."
reference_answer_by = CreatedBy(type=CreatedByType.HUMAN)
reference_contexts = ["This is a sample context"]
rag_example = LabelledRagDataExample(
query=query,
query_by=query_by,
reference_contexts=reference_contexts,
reference_answer=reference_answer,
reference_answer_by=reference_answer_by,
)
print(rag_example.json())
LabelledRagDataExample.parse_raw(rag_example.json())
rag_example.dict()
LabelledRagDataExample.parse_obj(rag_example.dict())
query = "This is a test query, is it so?"
reference_answer = "I think yes, it is."
reference_contexts = ["This is a second sample context"]
rag_example_2 = LabelledRagDataExample(
query=query,
query_by=query_by,
reference_contexts=reference_contexts,
reference_answer=reference_answer,
reference_answer_by=reference_answer_by,
)
from llama_index.core.llama_dataset import LabelledRagDataset
rag_dataset = LabelledRagDataset(examples=[rag_example, rag_example_2])
rag_dataset.to_pandas()
rag_dataset.save_json("rag_dataset.json")
reload_rag_dataset = LabelledRagDataset.from_json("rag_dataset.json")
reload_rag_dataset.to_pandas()
import nest_asyncio
nest_asyncio.apply()
get_ipython().system('pip install wikipedia -q')
from llama_index.readers.wikipedia import WikipediaReader
from llama_index.core import VectorStoreIndex
cities = [
"San Francisco",
]
documents = | WikipediaReader() | llama_index.readers.wikipedia.WikipediaReader |
get_ipython().run_line_magic('pip', 'install llama-index-llms-openai')
get_ipython().run_line_magic('pip', 'install llama-index-llms-anthropic')
import nest_asyncio
nest_asyncio.apply()
from llama_index.core import SimpleDirectoryReader, Document
from llama_index.core import SummaryIndex
from llama_index.llms.openai import OpenAI
from llama_index.llms.anthropic import Anthropic
from llama_index.core.evaluation import CorrectnessEvaluator
get_ipython().system("mkdir -p 'data/10k/'")
get_ipython().system("wget 'https://raw.githubusercontent.com/run-llama/llama_index/main/docs/examples/data/10k/uber_2021.pdf' -O 'data/10k/uber_2021.pdf'")
uber_docs0 = SimpleDirectoryReader(
input_files=["./data/10k/uber_2021.pdf"]
).load_data()
uber_doc = Document(text="\n\n".join([d.get_content() for d in uber_docs0]))
from llama_index.core.utils import globals_helper
num_tokens = len(globals_helper.tokenizer(uber_doc.get_content()))
print(f"NUM TOKENS: {num_tokens}")
context_str = "Jerry's favorite snack is Hot Cheetos."
query_str = "What is Jerry's favorite snack?"
def augment_doc(doc_str, context, position):
"""Augment doc with additional context at a given position."""
doc_str1 = doc_str[:position]
doc_str2 = doc_str[position:]
return f"{doc_str1}...\n\n{context}\n\n...{doc_str2}"
test_str = augment_doc(
uber_doc.get_content(), context_str, int(0.5 * len(uber_doc.get_content()))
)
async def run_experiments(
doc, position_percentiles, context_str, query, llm, response_mode="compact"
):
eval_llm = OpenAI(model="gpt-4-1106-preview")
correctness_evaluator = CorrectnessEvaluator(llm=eval_llm)
eval_scores = {}
for idx, position_percentile in enumerate(position_percentiles):
print(f"Position percentile: {position_percentile}")
position_idx = int(position_percentile * len(uber_doc.get_content()))
new_doc_str = augment_doc(
uber_doc.get_content(), context_str, position_idx
)
new_doc = Document(text=new_doc_str)
index = SummaryIndex.from_documents(
[new_doc],
)
query_engine = index.as_query_engine(
response_mode=response_mode, llm=llm
)
print(f"Query: {query}")
response = query_engine.query(query)
print(f"Response: {str(response)}")
eval_result = correctness_evaluator.evaluate(
query=query, response=str(response), reference=context_str
)
eval_score = eval_result.score
print(f"Eval score: {eval_score}")
eval_scores[position_percentile] = eval_score
return eval_scores
position_percentiles = [0.0, 0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9, 1.0]
llm = OpenAI(model="gpt-4-1106-preview")
eval_scores_gpt4 = await run_experiments(
[uber_doc],
position_percentiles,
context_str,
query_str,
llm,
response_mode="compact",
)
llm = OpenAI(model="gpt-4-1106-preview")
eval_scores_gpt4_ts = await run_experiments(
[uber_doc],
position_percentiles,
context_str,
query_str,
llm,
response_mode="tree_summarize",
)
llm = Anthropic(model="claude-2")
eval_scores_anthropic = await run_experiments(
[uber_doc], position_percentiles, context_str, query_str, llm
)
llm = | Anthropic(model="claude-2") | llama_index.llms.anthropic.Anthropic |
get_ipython().run_line_magic('pip', 'install llama-index-vector-stores-epsilla')
get_ipython().system('pip/pip3 install pyepsilla')
get_ipython().system('pip install llama-index')
import logging
import sys
from llama_index.core import SimpleDirectoryReader, Document, StorageContext
from llama_index.core import VectorStoreIndex
from llama_index.vector_stores.epsilla import EpsillaVectorStore
import textwrap
import openai
import getpass
OPENAI_API_KEY = getpass.getpass("OpenAI API Key:")
openai.api_key = OPENAI_API_KEY
get_ipython().system("mkdir -p 'data/paul_graham/'")
get_ipython().system("wget 'https://raw.githubusercontent.com/run-llama/llama_index/main/docs/examples/data/paul_graham/paul_graham_essay.txt' -O 'data/paul_graham/paul_graham_essay.txt'")
documents = SimpleDirectoryReader("./data/paul_graham/").load_data()
print(f"Total documents: {len(documents)}")
print(f"First document, id: {documents[0].doc_id}")
print(f"First document, hash: {documents[0].hash}")
from pyepsilla import vectordb
client = vectordb.Client()
vector_store = EpsillaVectorStore(client=client, db_path="/tmp/llamastore")
storage_context = StorageContext.from_defaults(vector_store=vector_store)
index = VectorStoreIndex.from_documents(
documents, storage_context=storage_context
)
query_engine = index.as_query_engine()
response = query_engine.query("Who is the author?")
print(textwrap.fill(str(response), 100))
response = query_engine.query("How did the author learn about AI?")
print(textwrap.fill(str(response), 100))
vector_store = EpsillaVectorStore(client=client, overwrite=True)
storage_context = StorageContext.from_defaults(vector_store=vector_store)
single_doc = Document(text="Epsilla is the vector database we are using.")
index = VectorStoreIndex.from_documents(
[single_doc],
storage_context=storage_context,
)
query_engine = index.as_query_engine()
response = query_engine.query("Who is the author?")
print(textwrap.fill(str(response), 100))
response = query_engine.query("What vector database is being used?")
print(textwrap.fill(str(response), 100))
vector_store = EpsillaVectorStore(client=client, overwrite=False)
index = | VectorStoreIndex.from_vector_store(vector_store=vector_store) | llama_index.core.VectorStoreIndex.from_vector_store |
get_ipython().run_line_magic('pip', 'install llama-index-vector-stores-qdrant')
get_ipython().system('pip install llama-index qdrant_client')
import qdrant_client
from llama_index.core import VectorStoreIndex
from llama_index.vector_stores.qdrant import QdrantVectorStore
client = qdrant_client.QdrantClient(
location=":memory:"
)
from llama_index.core.schema import TextNode
nodes = [
TextNode(
text="The Shawshank Redemption",
metadata={
"author": "Stephen King",
"theme": "Friendship",
"year": 1994,
},
),
TextNode(
text="The Godfather",
metadata={
"director": "Francis Ford Coppola",
"theme": "Mafia",
"year": 1972,
},
),
TextNode(
text="Inception",
metadata={
"director": "Christopher Nolan",
"theme": "Fiction",
"year": 2010,
},
),
TextNode(
text="To Kill a Mockingbird",
metadata={
"author": "Harper Lee",
"theme": "Mafia",
"year": 1960,
},
),
TextNode(
text="1984",
metadata={
"author": "George Orwell",
"theme": "Totalitarianism",
"year": 1949,
},
),
TextNode(
text="The Great Gatsby",
metadata={
"author": "F. Scott Fitzgerald",
"theme": "The American Dream",
"year": 1925,
},
),
TextNode(
text="Harry Potter and the Sorcerer's Stone",
metadata={
"author": "J.K. Rowling",
"theme": "Fiction",
"year": 1997,
},
),
]
import os
from llama_index.core import StorageContext
os.environ["OPENAI_API_KEY"] = "sk-..."
vector_store = QdrantVectorStore(
client=client, collection_name="test_collection_1"
)
storage_context = StorageContext.from_defaults(vector_store=vector_store)
index = VectorStoreIndex(nodes, storage_context=storage_context)
from llama_index.core.vector_stores import (
MetadataFilter,
MetadataFilters,
FilterOperator,
)
filters = MetadataFilters(
filters=[
| MetadataFilter(key="theme", operator=FilterOperator.EQ, value="Mafia") | llama_index.core.vector_stores.MetadataFilter |
get_ipython().run_line_magic('pip', 'install llama-index-vector-stores-weaviate')
get_ipython().system('pip install llama-index')
import logging
import sys
logging.basicConfig(stream=sys.stdout, level=logging.INFO)
logging.getLogger().addHandler(logging.StreamHandler(stream=sys.stdout))
import weaviate
resource_owner_config = weaviate.AuthClientPassword(
username="<username>",
password="<password>",
)
client = weaviate.Client("http://localhost:8080")
from llama_index.core import VectorStoreIndex, SimpleDirectoryReader
from llama_index.vector_stores.weaviate import WeaviateVectorStore
from llama_index.core.response.notebook_utils import display_response
get_ipython().system("mkdir -p 'data/paul_graham/'")
get_ipython().system("wget 'https://raw.githubusercontent.com/run-llama/llama_index/main/docs/examples/data/paul_graham/paul_graham_essay.txt' -O 'data/paul_graham/paul_graham_essay.txt'")
documents = SimpleDirectoryReader("./data/paul_graham/").load_data()
from llama_index.core import StorageContext
vector_store = | WeaviateVectorStore(weaviate_client=client) | llama_index.vector_stores.weaviate.WeaviateVectorStore |
get_ipython().run_line_magic('pip', 'install llama-index-readers-twitter')
import logging
import sys
logging.basicConfig(stream=sys.stdout, level=logging.INFO)
logging.getLogger().addHandler(logging.StreamHandler(stream=sys.stdout))
get_ipython().system('pip install llama-index')
from llama_index.core import VectorStoreIndex
from llama_index.readers.twitter import TwitterTweetReader
from IPython.display import Markdown, display
import os
BEARER_TOKEN = "<bearer_token>"
reader = | TwitterTweetReader(BEARER_TOKEN) | llama_index.readers.twitter.TwitterTweetReader |
get_ipython().run_line_magic('pip', 'install llama-index-storage-docstore-mongodb')
get_ipython().run_line_magic('pip', 'install llama-index-storage-index-store-mongodb')
get_ipython().run_line_magic('pip', 'install llama-index-llms-openai')
get_ipython().system('pip install llama-index')
import nest_asyncio
nest_asyncio.apply()
import logging
import sys
import os
logging.basicConfig(stream=sys.stdout, level=logging.INFO)
logging.getLogger().addHandler(logging.StreamHandler(stream=sys.stdout))
from llama_index.core import SimpleDirectoryReader, StorageContext
from llama_index.core import VectorStoreIndex, SimpleKeywordTableIndex
from llama_index.core import SummaryIndex
from llama_index.core import ComposableGraph
from llama_index.llms.openai import OpenAI
from llama_index.core.response.notebook_utils import display_response
from llama_index.core import Settings
get_ipython().system("mkdir -p 'data/paul_graham/'")
get_ipython().system("wget 'https://raw.githubusercontent.com/run-llama/llama_index/main/docs/examples/data/paul_graham/paul_graham_essay.txt' -O 'data/paul_graham/paul_graham_essay.txt'")
reader = SimpleDirectoryReader("./data/paul_graham/")
documents = reader.load_data()
from llama_index.core.node_parser import SentenceSplitter
nodes = SentenceSplitter().get_nodes_from_documents(documents)
MONGO_URI = os.environ["MONGO_URI"]
from llama_index.storage.docstore.mongodb import MongoDocumentStore
from llama_index.storage.index_store.mongodb import MongoIndexStore
storage_context = StorageContext.from_defaults(
docstore=MongoDocumentStore.from_uri(uri=MONGO_URI),
index_store=MongoIndexStore.from_uri(uri=MONGO_URI),
)
storage_context.docstore.add_documents(nodes)
summary_index = SummaryIndex(nodes, storage_context=storage_context)
vector_index = VectorStoreIndex(nodes, storage_context=storage_context)
keyword_table_index = SimpleKeywordTableIndex(
nodes, storage_context=storage_context
)
len(storage_context.docstore.docs)
storage_context.persist()
list_id = summary_index.index_id
vector_id = vector_index.index_id
keyword_id = keyword_table_index.index_id
from llama_index.core import load_index_from_storage
storage_context = StorageContext.from_defaults(
docstore=MongoDocumentStore.from_uri(uri=MONGO_URI),
index_store=MongoIndexStore.from_uri(uri=MONGO_URI),
)
summary_index = load_index_from_storage(
storage_context=storage_context, index_id=list_id
)
vector_index = load_index_from_storage(
storage_context=storage_context, vector_id=vector_id
)
keyword_table_index = load_index_from_storage(
storage_context=storage_context, keyword_id=keyword_id
)
chatgpt = OpenAI(temperature=0, model="gpt-3.5-turbo")
Settings.llm = chatgpt
Settings.chunk_size = 1024
query_engine = summary_index.as_query_engine()
list_response = query_engine.query("What is a summary of this document?")
| display_response(list_response) | llama_index.core.response.notebook_utils.display_response |
get_ipython().run_line_magic('pip', 'install llama-index-llms-openai')
get_ipython().system('pip install llama-index')
from llama_index.core.agent import ReActAgent
from llama_index.llms.openai import OpenAI
from llama_index.core.llms import ChatMessage
from llama_index.core.tools import BaseTool, FunctionTool
def multiply(a: int, b: int) -> int:
"""Multiply two integers and returns the result integer"""
return a * b
multiply_tool = FunctionTool.from_defaults(fn=multiply)
def add(a: int, b: int) -> int:
"""Add two integers and returns the result integer"""
return a + b
add_tool = FunctionTool.from_defaults(fn=add)
llm = OpenAI(model="gpt-3.5-turbo-instruct")
agent = ReActAgent.from_tools([multiply_tool, add_tool], llm=llm, verbose=True)
response = agent.chat("What is 20+(2*4)? Calculate step by step ")
response_gen = agent.stream_chat("What is 20+2*4? Calculate step by step")
response_gen.print_response_stream()
llm = OpenAI(model="gpt-4")
agent = | ReActAgent.from_tools([multiply_tool, add_tool], llm=llm, verbose=True) | llama_index.core.agent.ReActAgent.from_tools |
get_ipython().run_line_magic('pip', 'install llama-index-readers-web')
get_ipython().run_line_magic('pip', 'install llama-index-program-openai')
import nest_asyncio
nest_asyncio.apply()
import os
import openai
os.environ["OPENAI_API_KEY"] = "YOUR_API_KEY"
openai.api_key = os.getenv("OPENAI_API_KEY")
from pydantic import BaseModel, Field
from typing import List
class NodeMetadata(BaseModel):
"""Node metadata."""
entities: List[str] = Field(
..., description="Unique entities in this text chunk."
)
summary: str = Field(
..., description="A concise summary of this text chunk."
)
contains_number: bool = Field(
...,
description=(
"Whether the text chunk contains any numbers (ints, floats, etc.)"
),
)
from llama_index.program.openai import OpenAIPydanticProgram
from llama_index.core.extractors import PydanticProgramExtractor
EXTRACT_TEMPLATE_STR = """\
Here is the content of the section:
----------------
{context_str}
----------------
Given the contextual information, extract out a {class_name} object.\
"""
openai_program = OpenAIPydanticProgram.from_defaults(
output_cls=NodeMetadata,
prompt_template_str="{input}",
)
program_extractor = PydanticProgramExtractor(
program=openai_program, input_key="input", show_progress=True
)
from llama_index.readers.web import SimpleWebPageReader
from llama_index.core.node_parser import SentenceSplitter
reader = SimpleWebPageReader(html_to_text=True)
docs = reader.load_data(urls=["https://eugeneyan.com/writing/llm-patterns/"])
from llama_index.core.ingestion import IngestionPipeline
node_parser = SentenceSplitter(chunk_size=1024)
pipeline = | IngestionPipeline(transformations=[node_parser, program_extractor]) | llama_index.core.ingestion.IngestionPipeline |
get_ipython().run_line_magic('pip', 'install llama-index-readers-wikipedia')
get_ipython().run_line_magic('pip', 'install llama-index-llms-openai')
from llama_index.core.agent import (
CustomSimpleAgentWorker,
Task,
AgentChatResponse,
)
from typing import Dict, Any, List, Tuple, Optional
from llama_index.core.tools import BaseTool, QueryEngineTool
from llama_index.core.program import LLMTextCompletionProgram
from llama_index.core.output_parsers import PydanticOutputParser
from llama_index.core.query_engine import RouterQueryEngine
from llama_index.core import ChatPromptTemplate, PromptTemplate
from llama_index.core.selectors import PydanticSingleSelector
from llama_index.core.bridge.pydantic import Field, BaseModel
from llama_index.core.llms import ChatMessage, MessageRole
DEFAULT_PROMPT_STR = """
Given previous question/response pairs, please determine if an error has occurred in the response, and suggest \
a modified question that will not trigger the error.
Examples of modified questions:
- The question itself is modified to elicit a non-erroneous response
- The question is augmented with context that will help the downstream system better answer the question.
- The question is augmented with examples of negative responses, or other negative questions.
An error means that either an exception has triggered, or the response is completely irrelevant to the question.
Please return the evaluation of the response in the following JSON format.
"""
def get_chat_prompt_template(
system_prompt: str, current_reasoning: Tuple[str, str]
) -> ChatPromptTemplate:
system_msg = ChatMessage(role=MessageRole.SYSTEM, content=system_prompt)
messages = [system_msg]
for raw_msg in current_reasoning:
if raw_msg[0] == "user":
messages.append(
ChatMessage(role=MessageRole.USER, content=raw_msg[1])
)
else:
messages.append(
ChatMessage(role=MessageRole.ASSISTANT, content=raw_msg[1])
)
return ChatPromptTemplate(message_templates=messages)
class ResponseEval(BaseModel):
"""Evaluation of whether the response has an error."""
has_error: bool = Field(
..., description="Whether the response has an error."
)
new_question: str = Field(..., description="The suggested new question.")
explanation: str = Field(
...,
description=(
"The explanation for the error as well as for the new question."
"Can include the direct stack trace as well."
),
)
from llama_index.core.bridge.pydantic import PrivateAttr
class RetryAgentWorker(CustomSimpleAgentWorker):
"""Agent worker that adds a retry layer on top of a router.
Continues iterating until there's no errors / task is done.
"""
prompt_str: str = Field(default=DEFAULT_PROMPT_STR)
max_iterations: int = Field(default=10)
_router_query_engine: RouterQueryEngine = | PrivateAttr() | llama_index.core.bridge.pydantic.PrivateAttr |
get_ipython().run_line_magic('pip', 'install llama-index-embeddings-openai')
get_ipython().run_line_magic('pip', 'install llama-index-vector-stores-pinecone')
get_ipython().system('pip install llama-index')
import os
import pinecone
api_key = os.environ["PINECONE_API_KEY"]
pinecone.init(api_key=api_key, environment="eu-west1-gcp")
indexes = pinecone.list_indexes()
print(indexes)
if "quickstart-index" not in indexes:
pinecone.create_index(
"quickstart-index", dimension=1536, metric="euclidean", pod_type="p1"
)
pinecone_index = pinecone.Index("quickstart-index")
pinecone_index.delete(deleteAll="true")
books = [
{
"title": "To Kill a Mockingbird",
"author": "Harper Lee",
"content": (
"To Kill a Mockingbird is a novel by Harper Lee published in"
" 1960..."
),
"year": 1960,
},
{
"title": "1984",
"author": "George Orwell",
"content": (
"1984 is a dystopian novel by George Orwell published in 1949..."
),
"year": 1949,
},
{
"title": "The Great Gatsby",
"author": "F. Scott Fitzgerald",
"content": (
"The Great Gatsby is a novel by F. Scott Fitzgerald published in"
" 1925..."
),
"year": 1925,
},
{
"title": "Pride and Prejudice",
"author": "Jane Austen",
"content": (
"Pride and Prejudice is a novel by Jane Austen published in"
" 1813..."
),
"year": 1813,
},
]
import uuid
from llama_index.embeddings.openai import OpenAIEmbedding
embed_model = OpenAIEmbedding()
entries = []
for book in books:
vector = embed_model.get_text_embedding(book["content"])
entries.append(
{"id": str(uuid.uuid4()), "values": vector, "metadata": book}
)
pinecone_index.upsert(entries)
from llama_index.vector_stores.pinecone import PineconeVectorStore
from llama_index.core import VectorStoreIndex
from llama_index.core.response.pprint_utils import pprint_source_node
vector_store = PineconeVectorStore(
pinecone_index=pinecone_index, text_key="content"
)
retriever = VectorStoreIndex.from_vector_store(vector_store).as_retriever(
similarity_top_k=1
)
nodes = retriever.retrieve("What is that book about a bird again?")
| pprint_source_node(nodes[0]) | llama_index.core.response.pprint_utils.pprint_source_node |
get_ipython().run_line_magic('pip', 'install llama-index-readers-file')
get_ipython().run_line_magic('pip', 'install llama-index-vector-stores-pinecone')
get_ipython().run_line_magic('pip', 'install llama-index-embeddings-openai')
get_ipython().system('pip install llama-index')
import pinecone
import os
api_key = os.environ["PINECONE_API_KEY"]
pinecone.init(api_key=api_key, environment="us-west1-gcp")
pinecone.create_index(
"quickstart", dimension=1536, metric="euclidean", pod_type="p1"
)
pinecone_index = pinecone.Index("quickstart")
pinecone_index.delete(deleteAll=True)
from llama_index.vector_stores.pinecone import PineconeVectorStore
vector_store = PineconeVectorStore(pinecone_index=pinecone_index)
get_ipython().system('mkdir data')
get_ipython().system('wget --user-agent "Mozilla" "https://arxiv.org/pdf/2307.09288.pdf" -O "data/llama2.pdf"')
from pathlib import Path
from llama_index.readers.file import PyMuPDFReader
loader = PyMuPDFReader()
documents = loader.load(file_path="./data/llama2.pdf")
from llama_index.core import VectorStoreIndex
from llama_index.core.node_parser import SentenceSplitter
from llama_index.core import StorageContext
splitter = SentenceSplitter(chunk_size=1024)
storage_context = StorageContext.from_defaults(vector_store=vector_store)
index = VectorStoreIndex.from_documents(
documents, transformations=[splitter], storage_context=storage_context
)
query_str = "Can you tell me about the key concepts for safety finetuning"
from llama_index.embeddings.openai import OpenAIEmbedding
embed_model = OpenAIEmbedding()
query_embedding = embed_model.get_query_embedding(query_str)
from llama_index.core.vector_stores import VectorStoreQuery
query_mode = "default"
vector_store_query = VectorStoreQuery(
query_embedding=query_embedding, similarity_top_k=2, mode=query_mode
)
query_result = vector_store.query(vector_store_query)
query_result
from llama_index.core.schema import NodeWithScore
from typing import Optional
nodes_with_scores = []
for index, node in enumerate(query_result.nodes):
score: Optional[float] = None
if query_result.similarities is not None:
score = query_result.similarities[index]
nodes_with_scores.append( | NodeWithScore(node=node, score=score) | llama_index.core.schema.NodeWithScore |
get_ipython().run_line_magic('pip', 'install llama-index-llms-anthropic')
get_ipython().system('pip install llama-index')
from llama_index.llms.anthropic import Anthropic
from llama_index.core import Settings
tokenizer = Anthropic().tokenizer
Settings.tokenizer = tokenizer
import os
os.environ["ANTHROPIC_API_KEY"] = "YOUR ANTHROPIC API KEY"
from llama_index.llms.anthropic import Anthropic
llm = | Anthropic(model="claude-3-opus-20240229") | llama_index.llms.anthropic.Anthropic |
get_ipython().system('pip install llama-index')
from llama_index.core import VectorStoreIndex, SimpleDirectoryReader
from llama_index.core.postprocessor import (
PrevNextNodePostprocessor,
AutoPrevNextNodePostprocessor,
)
from llama_index.core.node_parser import SentenceSplitter
from llama_index.core.storage.docstore import SimpleDocumentStore
get_ipython().system("mkdir -p 'data/paul_graham/'")
get_ipython().system("wget 'https://raw.githubusercontent.com/run-llama/llama_index/main/docs/examples/data/paul_graham/paul_graham_essay.txt' -O 'data/paul_graham/paul_graham_essay.txt'")
from llama_index.core import StorageContext
documents = SimpleDirectoryReader("./data/paul_graham").load_data()
from llama_index.core import Settings
Settings.chunk_size = 512
nodes = | Settings.node_parser.get_nodes_from_documents(documents) | llama_index.core.Settings.node_parser.get_nodes_from_documents |
get_ipython().run_line_magic('pip', 'install llama-index-embeddings-openai')
get_ipython().run_line_magic('pip', 'install llama-index-vector-stores-typesense')
get_ipython().system("mkdir -p 'data/paul_graham/'")
get_ipython().system("wget 'https://raw.githubusercontent.com/run-llama/llama_index/main/docs/examples/data/paul_graham/paul_graham_essay.txt' -O 'data/paul_graham/paul_graham_essay.txt'")
from llama_index.core import (
VectorStoreIndex,
SimpleDirectoryReader,
StorageContext,
)
from IPython.display import Markdown, display
documents = SimpleDirectoryReader("./data/paul_graham/").load_data()
from llama_index.vector_stores.typesense import TypesenseVectorStore
from typesense import Client
typesense_client = Client(
{
"api_key": "xyz",
"nodes": [{"host": "localhost", "port": "8108", "protocol": "http"}],
"connection_timeout_seconds": 2,
}
)
typesense_vector_store = | TypesenseVectorStore(typesense_client) | llama_index.vector_stores.typesense.TypesenseVectorStore |
get_ipython().run_line_magic('pip', 'install llama-index-finetuning')
get_ipython().run_line_magic('pip', 'install llama-index-llms-openai')
get_ipython().run_line_magic('pip', 'install llama-index-finetuning-callbacks')
get_ipython().run_line_magic('pip', 'install llama-index-readers-file')
get_ipython().run_line_magic('pip', 'install llama-index-program-openai')
import nest_asyncio
nest_asyncio.apply()
import os
import openai
os.environ["OPENAI_API_KEY"] = "sk-..."
openai.api_key = os.environ["OPENAI_API_KEY"]
from llama_index.program.openai import OpenAIPydanticProgram
from pydantic import BaseModel
from llama_index.llms.openai import OpenAI
from llama_index.finetuning.callbacks import OpenAIFineTuningHandler
from llama_index.core.callbacks import CallbackManager
from typing import List
class Song(BaseModel):
"""Data model for a song."""
title: str
length_seconds: int
class Album(BaseModel):
"""Data model for an album."""
name: str
artist: str
songs: List[Song]
finetuning_handler = | OpenAIFineTuningHandler() | llama_index.finetuning.callbacks.OpenAIFineTuningHandler |
get_ipython().run_line_magic('pip', 'install llama-index-storage-docstore-mongodb')
get_ipython().run_line_magic('pip', 'install llama-index-vector-stores-qdrant')
get_ipython().run_line_magic('pip', 'install llama-index-storage-docstore-firestore')
get_ipython().run_line_magic('pip', 'install llama-index-retrievers-bm25')
get_ipython().run_line_magic('pip', 'install llama-index-storage-docstore-redis')
get_ipython().run_line_magic('pip', 'install llama-index-storage-docstore-dynamodb')
get_ipython().run_line_magic('pip', 'install llama-index-readers-file')
get_ipython().system('wget --user-agent "Mozilla" "https://arxiv.org/pdf/2307.09288.pdf" -O "./llama2.pdf"')
get_ipython().system('wget --user-agent "Mozilla" "https://arxiv.org/pdf/1706.03762.pdf" -O "./attention.pdf"')
from llama_index.core import download_loader
from llama_index.readers.file import PyMuPDFReader
llama2_docs = PyMuPDFReader().load_data(
file_path="./llama2.pdf", metadata=True
)
attention_docs = PyMuPDFReader().load_data(
file_path="./attention.pdf", metadata=True
)
import os
os.environ["OPENAI_API_KEY"] = "sk-..."
from llama_index.core.node_parser import TokenTextSplitter
nodes = TokenTextSplitter(
chunk_size=1024, chunk_overlap=128
).get_nodes_from_documents(llama2_docs + attention_docs)
from llama_index.core.storage.docstore import SimpleDocumentStore
from llama_index.storage.docstore.redis import RedisDocumentStore
from llama_index.storage.docstore.mongodb import MongoDocumentStore
from llama_index.storage.docstore.firestore import FirestoreDocumentStore
from llama_index.storage.docstore.dynamodb import DynamoDBDocumentStore
docstore = SimpleDocumentStore()
docstore.add_documents(nodes)
from llama_index.core import VectorStoreIndex, StorageContext
from llama_index.retrievers.bm25 import BM25Retriever
from llama_index.vector_stores.qdrant import QdrantVectorStore
from qdrant_client import QdrantClient
client = QdrantClient(path="./qdrant_data")
vector_store = QdrantVectorStore("composable", client=client)
storage_context = StorageContext.from_defaults(vector_store=vector_store)
index = VectorStoreIndex(nodes=nodes)
vector_retriever = index.as_retriever(similarity_top_k=2)
bm25_retriever = BM25Retriever.from_defaults(
docstore=docstore, similarity_top_k=2
)
from llama_index.core.schema import IndexNode
vector_obj = IndexNode(
index_id="vector", obj=vector_retriever, text="Vector Retriever"
)
bm25_obj = IndexNode(
index_id="bm25", obj=bm25_retriever, text="BM25 Retriever"
)
from llama_index.core import SummaryIndex
summary_index = | SummaryIndex(objects=[vector_obj, bm25_obj]) | llama_index.core.SummaryIndex |
get_ipython().run_line_magic('pip', 'install llama-hub-llama-packs-agents-llm-compiler-step')
get_ipython().run_line_magic('pip', 'install llama-index-readers-wikipedia')
get_ipython().run_line_magic('pip', 'install llama-index-llms-openai')
import phoenix as px
px.launch_app()
import llama_index.core
llama_index.core.set_global_handler("arize_phoenix")
import nest_asyncio
nest_asyncio.apply()
from llama_index.packs.agents.llm_compiler.step import LLMCompilerAgentWorker
from llama_index.core.llama_pack import download_llama_pack
download_llama_pack(
"LLMCompilerAgentPack",
"./agent_pack",
skip_load=True,
)
from agent_pack.step import LLMCompilerAgentWorker
import json
from typing import Sequence, List
from llama_index.llms.openai import OpenAI
from llama_index.core.llms import ChatMessage
from llama_index.core.tools import BaseTool, FunctionTool
import nest_asyncio
nest_asyncio.apply()
def multiply(a: int, b: int) -> int:
"""Multiple two integers and returns the result integer"""
return a * b
multiply_tool = FunctionTool.from_defaults(fn=multiply)
def add(a: int, b: int) -> int:
"""Add two integers and returns the result integer"""
return a + b
add_tool = FunctionTool.from_defaults(fn=add)
tools = [multiply_tool, add_tool]
multiply_tool.metadata.fn_schema_str
from llama_index.core.agent import AgentRunner
llm = OpenAI(model="gpt-4")
callback_manager = llm.callback_manager
agent_worker = LLMCompilerAgentWorker.from_tools(
tools, llm=llm, verbose=True, callback_manager=callback_manager
)
agent = AgentRunner(agent_worker, callback_manager=callback_manager)
response = agent.chat("What is (121 * 3) + 42?")
response
agent.memory.get_all()
get_ipython().system('pip install llama-index-readers-wikipedia')
from llama_index.readers.wikipedia import WikipediaReader
wiki_titles = ["Toronto", "Seattle", "Chicago", "Boston", "Miami"]
city_docs = {}
reader = WikipediaReader()
for wiki_title in wiki_titles:
docs = reader.load_data(pages=[wiki_title])
city_docs[wiki_title] = docs
from llama_index.core import ServiceContext
from llama_index.llms.openai import OpenAI
from llama_index.core.callbacks import CallbackManager
llm = OpenAI(temperature=0, model="gpt-4")
service_context = | ServiceContext.from_defaults(llm=llm) | llama_index.core.ServiceContext.from_defaults |
get_ipython().run_line_magic('pip', 'install llama-index-llms-gemini')
get_ipython().run_line_magic('pip', 'install llama-index-vector-stores-google')
get_ipython().run_line_magic('pip', 'install llama-index-indices-managed-google')
get_ipython().run_line_magic('pip', 'install llama-index-response-synthesizers-google')
get_ipython().run_line_magic('pip', 'install llama-index')
get_ipython().run_line_magic('pip', 'install "google-ai-generativelanguage>=0.4,<=1.0"')
get_ipython().run_line_magic('pip', 'install google-auth-oauthlib')
from google.oauth2 import service_account
from llama_index.vector_stores.google import set_google_config
credentials = service_account.Credentials.from_service_account_file(
"service_account_key.json",
scopes=[
"https://www.googleapis.com/auth/generative-language.retriever",
],
)
set_google_config(auth_credentials=credentials)
get_ipython().system("mkdir -p 'data/paul_graham/'")
get_ipython().system("wget 'https://raw.githubusercontent.com/run-llama/llama_index/main/docs/examples/data/paul_graham/paul_graham_essay.txt' -O 'data/paul_graham/paul_graham_essay.txt'")
import llama_index.core.vector_stores.google.generativeai.genai_extension as genaix
from typing import Iterable
from random import randrange
LLAMA_INDEX_COLAB_CORPUS_ID_PREFIX = f"llama-index-colab"
SESSION_CORPUS_ID_PREFIX = (
f"{LLAMA_INDEX_COLAB_CORPUS_ID_PREFIX}-{randrange(1000000)}"
)
def corpus_id(num_id: int) -> str:
return f"{SESSION_CORPUS_ID_PREFIX}-{num_id}"
SESSION_CORPUS_ID = corpus_id(1)
def list_corpora() -> Iterable[genaix.Corpus]:
client = genaix.build_semantic_retriever()
yield from genaix.list_corpora(client=client)
def delete_corpus(*, corpus_id: str) -> None:
client = genaix.build_semantic_retriever()
genaix.delete_corpus(corpus_id=corpus_id, client=client)
def cleanup_colab_corpora():
for corpus in list_corpora():
if corpus.corpus_id.startswith(LLAMA_INDEX_COLAB_CORPUS_ID_PREFIX):
try:
delete_corpus(corpus_id=corpus.corpus_id)
print(f"Deleted corpus {corpus.corpus_id}.")
except Exception:
pass
cleanup_colab_corpora()
from llama_index.core import SimpleDirectoryReader
from llama_index.indices.managed.google import GoogleIndex
from llama_index.core import Response
import time
index = GoogleIndex.create_corpus(
corpus_id=SESSION_CORPUS_ID, display_name="My first corpus!"
)
print(f"Newly created corpus ID is {index.corpus_id}.")
documents = SimpleDirectoryReader("./data/paul_graham/").load_data()
index.insert_documents(documents)
for corpus in list_corpora():
print(corpus)
query_engine = index.as_query_engine()
response = query_engine.query("What did Paul Graham do growing up?")
assert isinstance(response, Response)
print(f"Response is {response.response}")
for cited_text in [node.text for node in response.source_nodes]:
print(f"Cited text: {cited_text}")
if response.metadata:
print(
f"Answerability: {response.metadata.get('answerable_probability', 0)}"
)
index = GoogleIndex.from_corpus(corpus_id=SESSION_CORPUS_ID)
query_engine = index.as_query_engine()
response = query_engine.query("Which company did Paul Graham build?")
assert isinstance(response, Response)
print(f"Response is {response.response}")
from llama_index.core.schema import NodeRelationship, RelatedNodeInfo, TextNode
index = GoogleIndex.from_corpus(corpus_id=SESSION_CORPUS_ID)
index.insert_nodes(
[
TextNode(
text="It was the best of times.",
relationships={
NodeRelationship.SOURCE: RelatedNodeInfo(
node_id="123",
metadata={"file_name": "Tale of Two Cities"},
)
},
),
TextNode(
text="It was the worst of times.",
relationships={
NodeRelationship.SOURCE: RelatedNodeInfo(
node_id="123",
metadata={"file_name": "Tale of Two Cities"},
)
},
),
TextNode(
text="Bugs Bunny: Wassup doc?",
relationships={
NodeRelationship.SOURCE: RelatedNodeInfo(
node_id="456",
metadata={"file_name": "Bugs Bunny Adventure"},
)
},
),
]
)
from google.ai.generativelanguage import (
GenerateAnswerRequest,
HarmCategory,
SafetySetting,
)
index = GoogleIndex.from_corpus(corpus_id=SESSION_CORPUS_ID)
query_engine = index.as_query_engine(
temperature=0.2,
answer_style=GenerateAnswerRequest.AnswerStyle.ABSTRACTIVE,
safety_setting=[
SafetySetting(
category=HarmCategory.HARM_CATEGORY_SEXUALLY_EXPLICIT,
threshold=SafetySetting.HarmBlockThreshold.BLOCK_LOW_AND_ABOVE,
),
SafetySetting(
category=HarmCategory.HARM_CATEGORY_VIOLENCE,
threshold=SafetySetting.HarmBlockThreshold.BLOCK_ONLY_HIGH,
),
],
)
response = query_engine.query("What was Bugs Bunny's favorite saying?")
print(response)
from llama_index.core import Response
response = query_engine.query("What were Paul Graham's achievements?")
assert isinstance(response, Response)
print(f"Response is {response.response}")
for cited_text in [node.text for node in response.source_nodes]:
print(f"Cited text: {cited_text}")
if response.metadata:
print(
f"Answerability: {response.metadata.get('answerable_probability', 0)}"
)
from llama_index.llms.gemini import Gemini
GEMINI_API_KEY = "" # @param {type:"string"}
gemini = Gemini(api_key=GEMINI_API_KEY)
from llama_index.response_synthesizers.google import GoogleTextSynthesizer
from llama_index.vector_stores.google import GoogleVectorStore
from llama_index.core import VectorStoreIndex
from llama_index.core.postprocessor import LLMRerank
from llama_index.core.query_engine import RetrieverQueryEngine
from llama_index.core.retrievers import VectorIndexRetriever
store = GoogleVectorStore.from_corpus(corpus_id=SESSION_CORPUS_ID)
index = VectorStoreIndex.from_vector_store(
vector_store=store,
)
response_synthesizer = GoogleTextSynthesizer.from_defaults(
temperature=0.2,
answer_style=GenerateAnswerRequest.AnswerStyle.ABSTRACTIVE,
)
reranker = LLMRerank(
top_n=10,
llm=gemini,
)
query_engine = RetrieverQueryEngine.from_args(
retriever=VectorIndexRetriever(
index=index,
similarity_top_k=20,
),
node_postprocessors=[reranker],
response_synthesizer=response_synthesizer,
)
response = query_engine.query("What were Paul Graham's achievements?")
print(response)
from llama_index.core.indices.query.query_transform.base import (
StepDecomposeQueryTransform,
)
from llama_index.core.query_engine import MultiStepQueryEngine
store = GoogleVectorStore.from_corpus(corpus_id=SESSION_CORPUS_ID)
index = VectorStoreIndex.from_vector_store(
vector_store=store,
)
response_synthesizer = GoogleTextSynthesizer.from_defaults(
temperature=0.2,
answer_style=GenerateAnswerRequest.AnswerStyle.ABSTRACTIVE,
)
single_step_query_engine = index.as_query_engine(
similarity_top_k=10,
response_synthesizer=response_synthesizer,
)
step_decompose_transform = StepDecomposeQueryTransform(
llm=gemini,
verbose=True,
)
query_engine = MultiStepQueryEngine(
query_engine=single_step_query_engine,
query_transform=step_decompose_transform,
response_synthesizer=response_synthesizer,
index_summary="Ask me anything.",
num_steps=6,
)
response = query_engine.query("What were Paul Graham's achievements?")
print(response)
from llama_index.core.indices.query.query_transform import HyDEQueryTransform
from llama_index.core.query_engine import TransformQueryEngine
store = GoogleVectorStore.from_corpus(corpus_id=SESSION_CORPUS_ID)
index = VectorStoreIndex.from_vector_store(
vector_store=store,
)
response_synthesizer = GoogleTextSynthesizer.from_defaults(
temperature=0.2,
answer_style=GenerateAnswerRequest.AnswerStyle.ABSTRACTIVE,
)
base_query_engine = index.as_query_engine(
similarity_top_k=10,
response_synthesizer=response_synthesizer,
)
hyde = HyDEQueryTransform(
llm=gemini,
include_original=False,
)
hyde_query_engine = | TransformQueryEngine(base_query_engine, hyde) | llama_index.core.query_engine.TransformQueryEngine |
End of preview. Expand
in Dataset Viewer.
README.md exists but content is empty.
Use the Edit dataset card button to edit it.
- Downloads last month
- 46