class ChatHistory()A class to store the chat history (constant messages) for a chat flow/session.
def __init__(messages: List[Message] = None) -> NoneInitializes the ChatHistory class with the given parameters.
Arguments:
messages: The messages to initialize the chat history with.
@property
def messages() -> List[Message]Gets the messages in the chat history.
Returns:
The messages in the chat history.
def add_message(message: Message) -> NoneAdds a message made constant to the chat history.
Arguments:
message: The message to add to the chat history.
def make_const() -> NoneMakes this chat history constant so messages cannot be added.
def get_const() -> AnyCreates a deepcopy of self and makes it constant.
Returns:
A deepcopy of this chat history made constant so messages cannot be added
class ChatHistoryManager()A class to manage chat histories for multiple chat flows.
def __init__() -> NoneInitializes the ChatHistoryManager class.
def get_chat_history(chat_id: str) -> ChatHistoryArguments:
chat_id: The chat ID to get the chat history for.
Returns:
The chat history for the given chat ID.
def add_chat_history(chat_id: str,
chat_history: Optional[ChatHistory] = None) -> NoneArguments:
chat_id: The chat ID to add the chat history for.chat_history: The chat history to add for the given chat ID. If not provided, a placeholder (None) is added.
def replace_chat_history(chat_id: str, chat_history: ChatHistory) -> NoneArguments:
chat_id: The chat ID to replace the chat history for.chat_history: The chat history to replace for the given chat ID.
def delete_chat_history(chat_id: str) -> NoneArguments:
chat_id: The chat ID to delete the chat history for.
def get_chat_histories() -> Dict[str, ChatHistory]Returns:
The chat histories for all chat IDs.
def clear_chat_histories() -> NoneClears all chat histories.
def get_combined_chat_histories(chat_ids: List[str]) -> ChatHistoryArguments:
chat_ids: The chat IDs to get the combined chat history for. (Order matters)
Returns:
The combined chat history for the given chat IDs.
class ChatLLM()A class for chat completion using the GPT model.
def __init__(gpt_model: str = "gpt-3.5-turbo", stream=False, **kwargs) -> NoneInitializes the ChatLLM class with the given parameters.
Arguments:
gpt_model: GPT model to use for chat completion.stream: Whether to use stream mode.
def __call__(messages: List[Message]) -> Tuple[str, str, Dict]Generates a response using the GPT model based on the input messages.
Arguments:
messages: List of messages to use for chat completion.
Returns:
Response from the chat completion with content, role, and metadata.
class Chunker()def __init__(encoder, chunk_size: int, overlap_factor: float) -> NoneArguments:
encoder: The text encoder object.chunk_size: The desired chunk size in token units.overlap_factor: The factor to calculate the overlap between chunks.
def chunk(text: str) -> List[str]Chunks text into chunks of size chunk_size (token units) with overlap.
Arguments:
text: The input text to be chunked.
Returns:
A list of chunked text.
class SmartChunker(Chunker)def __init__(encoder,
chunk_size: int,
overlap_factor: float,
delimiters_tolerances_overlap: Optional[List[Tuple[str, float,
bool]]] = None,
prefer_large_chunks: bool = True) -> NoneArguments:
encoder: The text encoder object.chunk_size: The desired chunk size in token units.overlap_factor: The factor to calculate the overlap between chunks.delimiters_tolerances_overlap: A list of tuples with delimiter, tolerance, and overlap values for smart chunking. Defaults to None.prefer_large_chunks: If True, prefer chunks splitting at last occurance of a delimiter.
def chunk(text: str) -> List[str]Chunks text respecting delimiters, tolerances, and overlap into chunk_size with overlap.
Arguments:
text: The input text to be chunked.
Returns:
A list of chunked text.
def combine_chat_histories(chat_histories)Combines a list of chat histories into one chat history.
Arguments:
chat_histories: List of chat histories to combine.
Returns:
Combined chat history.
class BaseFlow(ABC)A class abstract class for all flows with inputs and one output at the end.
class ChatFlow(BaseFlow)A class for a chat flow with inputs and outputs at any point (except the first and last message).
Limitations:
- Variable checks are done on flow call, not on initialization.
def __init__(messages: List[Message],
default_chat_llm: Optional[ChatLLM] = None,
default_input_chat_history: Optional[ChatHistory] = None,
verbose: bool = False) -> NoneInitializes the ChatFlow class with the given parameters.
Arguments:
messages: List of messages in the chat flow.default_chat_llm: Optional default chat llm used in flow, if not provided in flow call.default_input_chat_history: Optional default input chat history used in flow, if not provided in flow call.verbose: Whether to print verbose output.
@property
def verbose()Returns:
Whether the flow is verbose.
@verbose.setter
def verbose(verbose: bool)Sets the verbose attribute.
Arguments:
verbose: Whether the flow is verbose.
@property
def input_varnames()Returns:
A deepcopy of input variable names.
@property
def output_varnames()Returns:
A deepcopy of output variable names.
def flow(
input_variables: dict,
chat_llm: Optional[ChatLLM] = None,
input_chat_history: Optional[ChatHistory] = None
) -> Tuple[Dict[str, str], Tuple[List[ChatHistory], List[ChatHistory]]]Runs the chat flow through an LLM.
Arguments:
input_variables: Dictionary of input variables.chat_llm: Optional chat language model to use for the chat flow.input_chat_history: Optional input chat history.
Returns:
Tuple of dictionary of output variables and a tuple of input and internal chat histories.
def __call__(input_variables: dict,
chat_llm: Optional[ChatLLM] = None,
input_chat_history: Optional[ChatHistory] = None,
return_all: bool = True) -> Tuple[Dict[str, str], ChatHistory]Runs the chat flow through an LLM.
Arguments:
input_variables: Dictionary of input variables.chat_llm: Optional chat language model to use for the chat flow.input_chat_history: Optional input chat history.return_all: If True, return all variables. If False, return only output variables.
Returns:
Tuple of dictionary of output variables and chat history.
def compress_histories(
histories: Tuple[List[ChatHistory], List[ChatHistory]]
) -> Tuple[ChatHistory, ChatHistory]Combines a tuple of list of chat histories into a tuple of two chat histories.
Arguments:
histories: Tuple of list of chat histories.
Returns:
Tuple of combined input and internal chat histories.
@staticmethod
def from_dicts(messages: List[Dict], **kwargs) -> NoneCreates a ChatFlow from a list of dictionaries of messages with metadata.
Arguments:
messages: List of dictionaries of messages {role: content_format, type: input/output} in the chat flow.
Returns:
ChatFlow object with the messages.
class FuncChatFlow(ChatFlow)A class for creating chat flows from functions.
def __init__(func: Callable[[dict, Optional[ChatLLM], Optional[ChatHistory]],
Tuple[Dict[str, str], Tuple[List[ChatHistory],
List[ChatHistory]]], ],
input_varnames: Set[str],
output_varnames: Set[str],
default_chat_llm: Optional[ChatLLM] = None,
default_input_chat_history: Optional[ChatHistory] = None,
verbose: bool = False) -> NoneInitializes a FuncChatFlow.
Arguments:
func: Function to use for the chat flow.input_varnames: List of input variable names.output_varnames: List of output variable names.default_chat_llm: Optional default chat language model used in flow, if not provided in.default_input_chat_history: Optional default input chat history used in flow, if not provided in.verbose: If True, print chat flow steps.
@property
def input_varnames()Returns:
A deepcopy of input variable names.
@property
def output_varnames()Returns:
A deepcopy of output variable names.
def flow(
input_variables: dict,
chat_llm: Optional[ChatLLM] = None,
input_chat_history: Optional[ChatHistory] = None
) -> Tuple[Dict[str, str], Tuple[List[ChatHistory], List[ChatHistory]]]Runs the chat flow through an LLM.
Arguments:
input_variables: Dictionary of input variables.chat_llm: Optional chat language model to use for the chat flow.input_chat_history: Optional input chat history.
Returns:
Tuple of dictionary of output variables and a tuple of input and internal chat histories.
class ChatFlowWrapper(ChatFlow)A ChatFlow wrapper class for others to inherit from.
def __init__(chat_flow: ChatFlow, verbose: bool = False) -> NoneInitializes a ChatFlowWrapper.
Arguments:
chat_flow: ChatFlow to wrap.verbose: Whether to print verbose output.
@property
def verbose()Returns:
Whether the flow is verbose.
@verbose.setter
def verbose(verbose: bool)Sets the verbose attribute.
Arguments:
verbose: Whether the flow is verbose.
@property
def input_varnames()Returns:
A deepcopy of input variable names.
@property
def output_varnames()Returns:
A deepcopy of output variable names.
def flow(
input_variables: dict,
chat_llm: Optional[ChatLLM] = None,
input_chat_history: Optional[ChatHistory] = None
) -> Tuple[Dict[str, str], Tuple[List[ChatHistory], List[ChatHistory]]]Runs the chat flow through an LLM.
Arguments:
input_variables: Dictionary of input variables.chat_llm: Optional chat language model to use for the chat flow.input_chat_history: Optional input chat history.
Returns:
Tuple of dictionary of output variables and a tuple of empty input and internal chat histories.
class NoHistory(ChatFlowWrapper)A ChatFlow that blocks the input chat history from being passed to the LLM and returns empty input and internal chat histories.
def __init__(chat_flow: ChatFlow,
allow_input_history: bool = False,
allow_rtn_internal_history: bool = False,
allow_rtn_input_history: bool = False,
disallow_default_history: bool = False,
verbose: bool = False) -> NoneInitializes a NoHistory object.
Arguments:
chat_flow: ChatFlow to wrap.allow_input_history: Whether to allow the input chat history to be passed to the LLM.allow_rtn_internal_history: Whether to allow the internal chat history to be returned.allow_rtn_input_history: Whether to allow the input chat history to be returned.disallow_default_history: Whether to disallow the default chat history to be returned.verbose: Whether to print verbose output.
def flow(
input_variables: dict,
chat_llm: Optional[ChatLLM] = None,
input_chat_history: Optional[ChatHistory] = None
) -> Tuple[Dict[str, str], Tuple[List[ChatHistory], List[ChatHistory]]]Runs the chat flow through an LLM.
Arguments:
input_variables: Dictionary of input variables.chat_llm: Optional chat language model to use for the chat flow.input_chat_history: Optional input chat history. Will not be used, but internal chat flow may use default.
Returns:
Tuple of dictionary of output variables and a tuple of empty input and internal chat histories.
class History(ChatFlowWrapper)A class that wraps a ChatFlow and uses a history manager to import and export histories to other History Chat Flows.
Limitations:
- If importing histories, the input chat histories will be ignored.
def __init__(chat_flow: ChatFlow,
history_manager: ChatHistoryManager,
histories_id: Optional[str],
histories_ids: Optional[List[str]] = None,
verbose: bool = False) -> NoneInitializes a History object.
Arguments:
chat_flow: ChatFlow to wrap.history_manager: Chat history manager to use.histories_id: Optional ID of the history to use. If provided, this chat flows input and internal histories will be saved to the history manager.histories_ids: Optional list of IDs of histories to use combine and use. If provided, input chat histories will be ignored.
def flow(
input_variables: dict,
chat_llm: Optional[ChatLLM] = None,
input_chat_history: Optional[ChatHistory] = None
) -> Tuple[Dict[str, str], Tuple[List[ChatHistory], List[ChatHistory]]]Runs the chat flow through an LLM.
Arguments:
input_variables: Dictionary of input variables.chat_llm: Optional chat language model to use for the chat flow.input_chat_history: Optional input chat history.
Returns:
Tuple of dictionary of output variables and a tuple of empty input and internal chat histories.
class MemoryChatFlow(ChatFlowWrapper)A class for creating chat flows that interact with external memories
def __init__(chat_flow: ChatFlow,
memory: Memory,
memory_query_kwargs: Optional[dict] = None,
default_chat_llm: Optional[ChatLLM] = None,
default_input_chat_history: Optional[ChatHistory] = None,
verbose: bool = False) -> NoneInitializes a MemoryChatFlow from a ChatFlow.
Arguments:
chat_flow: ChatFlow to used for the chat flow and to get the querymemory: Memory to use for the chat flow.memory_query_kwargs: Optional keyword arguments to pass to memory query.default_chat_llm: Optional default chat language model used in flow, if not provided in.default_input_chat_history: Optional default input chat history used in flow, if not provided in.verbose: If True, print chat flow steps.
@property
def input_varnames()Returns:
A deepcopy of input variable names.
@property
def output_varnames()Returns:
A deepcopy of output variable names.
def flow(
input_variables: dict,
chat_llm: Optional[ChatLLM] = None,
input_chat_history: Optional[ChatHistory] = None
) -> Tuple[Dict[str, str], Tuple[List[ChatHistory], List[ChatHistory]]]Runs the chat flow through an LLM and gets a query which is used to get memory from external memories.
Arguments:
input_variables: Dictionary of input variables.chat_llm: Optional chat language model to use for the chat flow.input_chat_history: Optional input chat history.
Returns:
Tuple of dictionary of output variables and a tuple of input and internal chat histories.
class ConditonalChatFlow(ChatFlowWrapper)A class for creating conditional chat flows, which shift flows based on the output of previous messages.
def __init__(decision_chat_flow: ChatFlow,
branch_chat_flows: Dict[str, ChatFlow],
default_chat_llm: Optional[ChatLLM] = None,
default_input_chat_history: Optional[ChatHistory] = None,
verbose: bool = False)Initializes a ConditonalChatFlow.
Arguments:
decision_chat_flow: Chat flow for making the decision.branch_chat_flows: Dictionary of chat flows for each branch. Usedefaultas the key for the default branch.default_chat_llm: Optional default chat language model used in flow, if not provided in flow call.default_input_chat_history: Optional default input chat history used in flow, if not provided in flow call.verbose: If True, print chat flow messages.
@property
def verbose()Returns:
Whether the flow is verbose.
@verbose.setter
def verbose(verbose: bool)Sets the verbose attribute.
Arguments:
verbose: Whether the flow is verbose.
def flow(
input_variables: dict,
chat_llm: Optional[ChatLLM] = None,
input_chat_history: Optional[ChatHistory] = None
) -> Tuple[Dict[str, str], Tuple[List[ChatHistory], List[ChatHistory]]]Runs the decision chat flow through an LLM and then from the decision the appropriate branch.
Arguments:
input_variables: Dictionary of input variables.chat_llm: Optional chat language model to use for the chat flow.input_chat_history: Optional input chat history.
Returns:
Tuple of dictionary of output variables and a tuple of input and internal chat histories.
def compress_histories(
histories: Tuple[List[ChatHistory], List[ChatHistory]]
) -> Tuple[ChatHistory, ChatHistory]Combines a tuple of list of chat histories into a tuple of two chat histories.
Arguments:
histories: Tuple of list of chat histories.
Returns:
Tuple of combined input and internal chat histories.
class SequentialChatFlows(ChatFlowWrapper)A sequential chat flow class that runs a list of chat flows sequentially.
Limitations:
- All chat flows use the input history returned by the first chat flow plus internal of previous chat flows.
- A chat flow can take an input and overwrite the original input with a new output with the same name. Be careful.
def __init__(chat_flows: List[ChatFlow],
default_chat_llm: Optional[ChatLLM] = None,
default_input_chat_history: Optional[ChatHistory] = None,
verbose: bool = False) -> NoneInitializes a seqeuntial chat flows class.
Arguments:
chat_flows: List of chat flows to run sequentially.default_chat_llm: Optional default chat language model used in flow, if not provided in flow call.default_input_chat_history: Optional default input chat history used in flow, if not provided in flow call.verbose: If True, print chat flow messages.
@property
def verbose()Returns:
Whether the flow is verbose.
@verbose.setter
def verbose(verbose: bool)Sets the verbose attribute.
Arguments:
verbose: Whether the flow is verbose.
@property
def input_varnames()Returns:
A deepcopy of input variable names.
@property
def output_varnames()Returns:
A deepcopy of output variable names.
def flow(
input_variables: dict,
chat_llm: Optional[ChatLLM] = None,
input_chat_history: Optional[ChatHistory] = None
) -> Tuple[Dict[str, str], Tuple[List[ChatHistory], List[ChatHistory]]]Runs the chat flows through an LLM.
Arguments:
input_variables: Dictionary of input variables.chat_llm: Optional chat language model to use for the chat flow.input_chat_history: Optional input chat history.
Returns:
Tuple of dictionary of output variables and list of chat histories.
class ConcurrentChatFlows(ChatFlowWrapper)def __init__(chat_flows: List[ChatFlow],
default_chat_llm: Optional[ChatLLM] = None,
default_input_chat_history: Optional[ChatHistory] = None,
max_workers=None,
verbose: bool = False) -> NoneInitializes a concurrent chat flows class.
Arguments:
chat_flows: List of chat flows to run concurrently.default_chat_llm: Optional default chat language model used in flow, if not provided in flow call.default_input_chat_history: Optional default input chat history used in flow, if not provided in flow call.max_workers: Number of threads to use for concurrent chat flows. If None, use all available threads.verbose: If True, print chat flow messages.
@property
def verbose()Returns:
Whether the flow is verbose.
@verbose.setter
def verbose(verbose: bool)Sets the verbose attribute.
Arguments:
verbose: Whether the flow is verbose.
@property
def input_varnames()Returns:
A deepcopy of input variable names.
@property
def output_varnames()Returns:
A deepcopy of output variable names.
def flow(
input_variables: dict,
chat_llm: Optional[ChatLLM] = None,
input_chat_history: Optional[ChatHistory] = None
) -> Tuple[Dict[str, str], Tuple[List[ChatHistory], List[ChatHistory]]]Runs the chat flows concurrently through an LLM.
Arguments:
input_variables: Dictionary of input variables.chat_llm: Optional chat language model to use for the chat flow.input_chat_history: Optional input chat history.
Returns:
Tuple of dictionary of output variables and tuple of list of chat histories (order matches ordering of chat_flows).
def compress_histories(
histories: Tuple[List[ChatHistory], List[ChatHistory]]
) -> Tuple[ChatHistory, ChatHistory]Combines a tuple of list of chat histories into a tuple of two chat histories.
Arguments:
histories: Tuple of list of chat histories.
Returns:
Tuple of combined input and internal chat histories.
class ChatSpiral(ChatFlowWrapper)def __init__(chat_flow: ChatFlow,
output_varnames_remap: Optional[Dict[str, str]] = None,
default_chat_llm: Optional[ChatLLM] = None,
default_input_chat_history: Optional[ChatHistory] = None,
verbose: bool = False) -> NoneInitializes a chat spiral class.
Arguments:
chat_flow: Chat flow to spiral.output_varnames_remap: Optional dictionary of output variable names to remap.default_chat_llm: Optional default chat language model used in flow, if not provided in flow/spiral call.default_input_chat_history: Optional default input chat history used in flow, if not provided in flow/spiral call.
def flow(
input_variables: dict,
chat_llm: Optional[ChatLLM] = None,
input_chat_history: Optional[ChatHistory] = None
) -> Tuple[Dict[str, str], Tuple[List[ChatHistory], List[ChatHistory]]]Runs the chat flow through an LLM.
Arguments:
input_variables: Dictionary of input variables.chat_llm: Optional chat language model to use for the chat flow.input_chat_history: Optional input chat history.
Returns:
Tuple of dictionary of output variables and two tuple of list of chat histories.
def spiral(
input_variables: dict,
reset_history: bool = False,
chat_llm: Optional[ChatLLM] = None,
input_chat_history: Optional[ChatHistory] = None,
max_iterations: Optional[int] = None
) -> Tuple[Dict[str, str], ChatHistory]Runs the chat flow through an LLM continuously.
Arguments:
input_variables: Dictionary of input variables.reset_history: Whether to reset the chat history after each chat flow completion.chat_llm: Optional chat language model to use for the chat flow.input_chat_history: Optional input chat history.max_iterations: Maximum number of iterations to run through the chat flow.
Returns:
Tuple of dictionary of output variables and chat history
def __call__(input_variables: dict,
reset_history: bool = False,
chat_llm: Optional[ChatLLM] = None,
input_chat_history: Optional[ChatHistory] = None,
max_iterations: Optional[int] = None,
return_all: bool = True) -> Tuple[Dict[str, str], ChatHistory]Runs the chat flow through an LLM continuously.
Arguments:
input_variables: Dictionary of input variables.reset_history: Whether to reset the chat history after each chat flow completion.chat_llm: Optional chat language model to use for the chat flow.input_chat_history: Optional input chat history.max_iterations: Maximum number of iterations to run through the chat flow.return_all: Whether to return all output variables.
Returns:
Tuple of dictionary of output variables and chat history
def compress_histories(
histories: Tuple[List[ChatHistory], List[ChatHistory]]
) -> Tuple[ChatHistory, ChatHistory]Combines a tuple of list of chat histories into a tuple of two chat histories.
Arguments:
histories: Tuple of list of chat histories.
Returns:
Tuple of combined input and internal chat histories.
class Loader(ABC)def __init__(chunker=None)Arguments:
chunker: The chunker to split text into chunks (default: None).
def handle_chunking(
loaded_items: List[Dict[str, Any]]) -> List[Dict[str, Any]]Arguments:
loaded_items: List of dictionaries containing the path to the file and its content with other possible metadata.
class TextLoader(Loader)def __init__(encoding: str = "utf-8", chunker=None)Arguments:
encoding: The encoding of the text file (default: 'utf-8').chunker: The chunker to split text into chunks (default: None).
def load(filepath: str) -> List[Dict[str, Any]]Arguments:
filepath: Path to the text file to be loaded.
Returns:
Dictionary containing the path to the file and its content.
class PDFLoader(Loader)PDFLoader that uses fitz (PyMuPDF) to load PDF files.
def load(filepath: str) -> List[Dict[str, Any]]Arguments:
filepath: Path to the PDF file to be loaded.
Returns:
List of dictionaries containing the path to the file, its content, and the page number.
class HTMLLoader(Loader)HTMLLoader that uses beautiful soup to load HTML files.
def load(filepath: str) -> List[Dict[str, Any]]Arguments:
filepath: Path to the HTML file to be loaded.
Returns:
Dictionary containing the path to the file, its content, and the soup title.
class DirectoryLoader(Loader)def __init__(path: str,
loader: Loader,
should_recurse: bool = True,
filter_regex: Optional[str] = None)Arguments:
path: Path to the directory to load files from.loader: Class that actually loads the specific file.should_recurse: Whether to recursively load files from subdirectories (default: True).filter_regex: Regular expression to filter files by (default: None).
def load(current_path: Optional[str] = None) -> List[Dict[str, Any]]Arguments:
current_path: Current path while recursively loading directories (default: None).
Returns:
List of dictionaries containing the path to the file, its content, and possibly other metadata.
class DirectoryMultiLoader(Loader)def __init__(path: str,
loader: Dict[str, Loader],
should_recurse: bool = True)Arguments:
path: Path to the directory to load files from.loader: Dictionary of loader instances, with keys being filter regexes.should_recurse: Whether to recursively load files from subdirectories (default: True).
def load(current_path: Optional[str] = None) -> List[Dict[str, Any]]Arguments:
current_path: Current path while recursively loading directories (default: None).
def combine_on_overlap(str1: str, str2: str,
threshold: float) -> Optional[str]Combines two strings if they overlap by a certain threshold.
Arguments:
str1: First string to combine.str2: Second string to combine.threshold: Threshold for ratio of overlap to combine results from multiple queries.
Returns:
Combined string if they overlap by a certain threshold, otherwise None.
class Memory()def __init__(filepath: Optional[str] = None,
embedding_model: str = "text-embedding-ada-002") -> NoneInitializes the memory.
Arguments:
filepath: Path to a pickle file to load and save the memory to. If None, the memory is created with text and metadata fields.embedding_model: Model to use for the embedding.
def save(filepath: Optional[str] = None) -> NoneSaves the memory to a file.
Arguments:
filepath: Path to the pickle file to save the memory to. If None, the filepath passed in the constructor is used.
def load(filepath: Optional[str] = None) -> NoneLoads the memory from a pickle file.
Arguments:
filepath: Path to a pickle file to load the memory from. If None, the filepath passed in the constructor is used.
def add(data: Dict[str, str],
save: bool = False,
filepath: Optional[str] = None) -> NoneAdds data to memory.
Arguments:
data: Dict of data with a text and metadata field to add to memory.save: Whether to save the memory to a file.filepath: Path to the file (csv or parquet) to save the memory to. If None, the filepath passed in the constructor is used.
def query(query: str,
k: int = 1,
combine_threshold: Optional[float] = None) -> list[Dict[str, str]]Queries the memory with the given query.
Arguments:
query: Query to use to get memory.k: Max number of results to return.combine_threshold: Threshold for ratio of overlap to combine results from multiple queries. If None, no combining is done.
Returns:
Memory obtained from external memories with metadata and scores (cosine similarity).
def extract_fstring_variables(text: str) -> List[str]Extracts variables from a f-string like text.
Arguments:
text: f-string like text to extract variables from.
def flatten_dict(d: dict, parent_key: str = "", sep: str = ".") -> dictFlatten a dictionary.
Arguments:
d: Dictionary to flatten.parent_key: Parent key to use.sep: Separator to use.
Returns:
Flattened dictionary.
class ExtractionError(Exception)A class to represent an error in extracting a variable from a message.
class Role()A class to represent the role of a message. Using OpenAI roles.
class Message(ABC)A class to represent a message.
def __init__(content_format: str, role: Optional[str] = None) -> NoneInitializes the Message class with the given parameters.
Arguments:
content_format: A f-string format for the message content.role: Role associated with the message (default is None).
@content_format.setter
def content_format(content_format: str)Arguments:
content_format: A f-string like format for the message content.
@role.setter
def role(role: str)Arguments:
role: Role associated with the message.
def defined() -> boolDetermines if all variables have a value, essentially if the message has been called or has no variables.
Returns:
True if all variables have a value, False otherwise.
def make_const() -> NoneMakes this message constant so variables and content format cannot change.
def get_const() -> AnyCreates a deepcopy of self and makes it constant.
Returns:
A deepcopy of this message made constant so variables and content format cannot change.
@abstractmethod
def __call__(**kwargs: Any) -> AnyA method to run content through to get variables or to put variables in to form a content.
def __str__() -> strReturns:
The message content if defined, otherwise the message content format.
class InputMessage(Message)A class to represent a message that takes variables as inputs to construct.
def __init__(content_format: str,
role: Optional[str] = Role.USER,
custom_insert_variables_func: Optional[Callable[[Dict[str, Any]],
str]] = None)Initializes the InputMessage class with the given parameters.
Arguments:
content_format: A f-string format for the message content.role: Role associated with the message (default is None).custom_insert_variables_func: A custom function to insert variables into the message content. Takes the content_format and a dictionary of variables and returns the message content.
def __call__(**kwargs: Any) -> strGet the message content with inserted variables.
Arguments:
kwargs: A dictionary containing variable values.
Returns:
The message content with inserted variables.
def insert_variables(variables: Dict[str, Any]) -> strInsert variables into the message content.
Arguments:
variables: A dictionary containing variable values.
Returns:
The message content with inserted variables.
class OutputMessage(Message)A class to represent a message that outputs variables from its message content.
Limitations:
- Variables must be seperated. Regex pattern used: (?P<{}>[\s\S]*)
def __init__(
content_format: str,
role: Optional[str] = Role.ASSISTANT,
custom_extract_variables_func: Optional[Callable[[List[str], str, str],
Dict[str, Any]]] = None)Initializes the OutputMessage class with the given parameters.
Arguments:
content_format: A f-string format for the message content.role: Role associated with the message (default is None).custom_extract_variables_func: A custom function to extract variables from the message content. Takes a list of variable names, the content format, and the message content. Returns a dictionary containing the extracted variables.
def __call__(**kwargs: Any) -> Dict[str, Any]Extract variables from the message content.
Arguments:
kwargs: A dictionary containing the message content.
Returns:
A dictionary containing the extracted variables.
def extract_variables(content) -> Dict[str, Any]Extract variables from the message content.
Arguments:
content: The message content to extract variables from.
Returns:
A dictionary containing the extracted variables.
class InputJSONMessage(InputMessage)A class to represent a message that takes JSON dict keys-values as inputs to construct.
Limitations:
- Sub-dictionaries are accessed by periods and replaced with underscores in processing, so name conflicts can occur.
def __init__(content_format: str,
role: Optional[str] = Role.USER,
expected_input_varnames: Optional[Set[str]] = None)Initializes the InputJSONMessage class with the given parameters.
Arguments:
content_format: A f-string format for the message content.role: Role associated with the message (default is None).expected_input_varnames: A set of expected input variable names.
def insert_variables_into_json(content_format: str,
variables: Dict[str, Any]) -> strInsert variables from dict into the message content.
Arguments:
content_format: The message content format.variables: A dictionary containing variable values.
Returns:
The message content with inserted variables.
class OutputJSONMessage(OutputMessage)A class to represent a message that outputs JSON dict keys-values from its message content.
Limitations:
- Only supports JSON dicts as outputs.
- Regex patterns do not necessarily match every content_format possible.
def __init__(content_format: str, role: Optional[str] = Role.ASSISTANT)Initializes the OutputJSONMessage class with the given parameters.
Arguments:
content_format: A f-string format for the message content.role: Role associated with the message (default is None).
def extract_variables_from_json(names: List[str], content_format: str,
content: str) -> Dict[str, Any]Extract JSON Dict from the message content.
Arguments:
names: A list of variable names.content_format: The message content format.content: The message content to extract variables from.
Returns:
A dictionary containing the extracted variables.
class OutputOptions(OutputMessage)A wrapper class to represent a message with multiple OutputMessage options.
This class will try each OutputMessage sequentially until it does not raise an ExtractionError. It will return the extracted variables from the successful OutputMessage along with all the other variables that were not in that output with an empty string.
def __init__(output_messages: List[OutputMessage],
role: Optional[str] = Role.ASSISTANT)Initializes the OutputOptions class with the given parameters.
Arguments:
output_messages: A list of OutputMessage instances.role: Role associated with the message (default is None).
@property
def output_messages() -> List[OutputMessage]Returns the list of OutputMessage instances.
Returns:
The list of OutputMessage instances.
def extract_variables(content) -> Dict[str, Any]Extract variables from the message content.
Arguments:
content: The message content to extract variables from.
Returns:
A dictionary containing the extracted variables.
class GoogleSearchTool(BaseTool)def __init__(api_key: str,
cse_id: str,
num_results: int = 10,
failed_search_result: str = "No google search results found.",
join_snippets: Optional[str] = "\n") -> NoneInitialize the GoogleSearchTool.
Arguments:
api_key: The Google API key.cse_id: The Google Custom Search Engine ID.num_results: The max number of results to return.failed_search_result: The result to return if the search fails.join_snippets: The string to join the snippets with. If None, the snippets will be returned as a list.
def search(query: str) -> Optional[list]Arguments:
query: The query to search for.
Returns:
The search results.
def use(inputs: Dict[str, str]) -> Union[str, list[str]]Arguments:
inputs: The inputs to the tool. Must contain a 'query' key.
Returns:
The output of the tool: Google search snippets.
class FileTool(BaseTool)def __init__(path: str) -> NoneInitialize the FileTool.
Arguments:
path: The path to the file.
def check_required_inputs(required_inputs: list[str],
inputs: list[str]) -> strChecks if the required inputs are in the inputs list. If not, raises a ValueError.
Arguments:
required_inputs: The required inputs.inputs: The inputs to check.
def get_closest_command(incorrect_command: str)Returns the closest command to an incorrectly inputted command
Arguments:
incorrect_command: The command that was inputted incorrectly.
Returns:
The closest possible command.
def use(inputs: Dict[str, str]) -> Union[str, list[str]]Arguments:
inputs: The inputs to the tool. Must contain a 'command' key. Depending on the 'command' value, other keys will be required as follows: [read, r]: file [write, w]: file, data [append, a]: file, data [cd, chdir, change directory]: path [ls, list]: path [mkdir, make directory]: path
Returns:
The output of the tool: The contents of the file.