Skip to content

Commit

Permalink
Feature/OpenAI Tool Agent (#1991)
Browse files Browse the repository at this point in the history
add openai tool agent
  • Loading branch information
HenryHengZJ committed Mar 19, 2024
1 parent ec1bbc8 commit c11c43c
Show file tree
Hide file tree
Showing 9 changed files with 435 additions and 1,029 deletions.
Original file line number Diff line number Diff line change
Expand Up @@ -14,7 +14,7 @@ import { AgentExecutor, formatAgentSteps } from '../../../src/agents'
import { Moderation, checkInputs, streamResponse } from '../../moderation/Moderation'
import { formatResponse } from '../../outputparsers/OutputParserHelpers'

class MistralAIFunctionAgent_Agents implements INode {
class MistralAIToolAgent_Agents implements INode {
label: string
name: string
version: number
Expand All @@ -28,14 +28,14 @@ class MistralAIFunctionAgent_Agents implements INode {
badge?: string

constructor(fields?: { sessionId?: string }) {
this.label = 'MistralAI Function Agent'
this.name = 'mistralAIFunctionAgent'
this.label = 'MistralAI Tool Agent'
this.name = 'mistralAIToolAgent'
this.version = 1.0
this.type = 'AgentExecutor'
this.category = 'Agents'
this.icon = 'MistralAI.svg'
this.badge = 'NEW'
this.description = `An agent that uses MistralAI Function Calling to pick the tool and args to call`
this.description = `Agent that uses MistralAI Function Calling to pick the tools and args to call`
this.baseClasses = [this.type, ...getBaseClasses(AgentExecutor)]
this.inputs = [
{
Expand Down Expand Up @@ -204,4 +204,4 @@ const prepareAgent = (
return executor
}

module.exports = { nodeClass: MistralAIFunctionAgent_Agents }
module.exports = { nodeClass: MistralAIToolAgent_Agents }
Original file line number Diff line number Diff line change
Expand Up @@ -23,6 +23,7 @@ class OpenAIFunctionAgent_Agents implements INode {
category: string
baseClasses: string[]
inputs: INodeParams[]
badge?: string
sessionId?: string

constructor(fields?: { sessionId?: string }) {
Expand All @@ -32,8 +33,9 @@ class OpenAIFunctionAgent_Agents implements INode {
this.type = 'AgentExecutor'
this.category = 'Agents'
this.icon = 'function.svg'
this.description = `An agent that uses Function Calling to pick the tool and args to call`
this.description = `An agent that uses OpenAI Function Calling to pick the tool and args to call`
this.baseClasses = [this.type, ...getBaseClasses(AgentExecutor)]
this.badge = 'DEPRECATING'
this.inputs = [
{
label: 'Allowed Tools',
Expand Down
205 changes: 205 additions & 0 deletions packages/components/nodes/agents/OpenAIToolAgent/OpenAIToolAgent.ts
Original file line number Diff line number Diff line change
@@ -0,0 +1,205 @@
import { flatten } from 'lodash'
import { BaseMessage } from '@langchain/core/messages'
import { ChainValues } from '@langchain/core/utils/types'
import { RunnableSequence } from '@langchain/core/runnables'
import { ChatOpenAI } from '@langchain/openai'
import { ChatPromptTemplate, MessagesPlaceholder } from '@langchain/core/prompts'
import { convertToOpenAITool } from '@langchain/core/utils/function_calling'
import { formatToOpenAIToolMessages } from 'langchain/agents/format_scratchpad/openai_tools'
import { OpenAIToolsAgentOutputParser, type ToolsAgentStep } from 'langchain/agents/openai/output_parser'
import { getBaseClasses } from '../../../src/utils'
import { FlowiseMemory, ICommonObject, IMessage, INode, INodeData, INodeParams, IUsedTool } from '../../../src/Interface'
import { ConsoleCallbackHandler, CustomChainHandler, additionalCallbacks } from '../../../src/handler'
import { AgentExecutor } from '../../../src/agents'
import { Moderation, checkInputs } from '../../moderation/Moderation'
import { formatResponse } from '../../outputparsers/OutputParserHelpers'

class OpenAIToolAgent_Agents implements INode {
label: string
name: string
version: number
description: string
type: string
icon: string
category: string
baseClasses: string[]
inputs: INodeParams[]
sessionId?: string
badge?: string

constructor(fields?: { sessionId?: string }) {
this.label = 'OpenAI Tool Agent'
this.name = 'openAIToolAgent'
this.version = 1.0
this.type = 'AgentExecutor'
this.category = 'Agents'
this.icon = 'function.svg'
this.description = `Agent that uses OpenAI Function Calling to pick the tools and args to call`
this.baseClasses = [this.type, ...getBaseClasses(AgentExecutor)]
this.badge = 'NEW'
this.inputs = [
{
label: 'Tools',
name: 'tools',
type: 'Tool',
list: true
},
{
label: 'Memory',
name: 'memory',
type: 'BaseChatMemory'
},
{
label: 'OpenAI/Azure Chat Model',
name: 'model',
type: 'BaseChatModel'
},
{
label: 'System Message',
name: 'systemMessage',
type: 'string',
rows: 4,
optional: true,
additionalParams: true
},
{
label: 'Input Moderation',
description: 'Detect text that could generate harmful output and prevent it from being sent to the language model',
name: 'inputModeration',
type: 'Moderation',
optional: true,
list: true
}
]
this.sessionId = fields?.sessionId
}

async init(nodeData: INodeData, input: string, options: ICommonObject): Promise<any> {
return prepareAgent(nodeData, { sessionId: this.sessionId, chatId: options.chatId, input }, options.chatHistory)
}

async run(nodeData: INodeData, input: string, options: ICommonObject): Promise<string | ICommonObject> {
const memory = nodeData.inputs?.memory as FlowiseMemory
const moderations = nodeData.inputs?.inputModeration as Moderation[]

if (moderations && moderations.length > 0) {
try {
// Use the output of the moderation chain as input for the OpenAI Function Agent
input = await checkInputs(moderations, input)
} catch (e) {
await new Promise((resolve) => setTimeout(resolve, 500))
//streamResponse(options.socketIO && options.socketIOClientId, e.message, options.socketIO, options.socketIOClientId)
return formatResponse(e.message)
}
}

const executor = prepareAgent(nodeData, { sessionId: this.sessionId, chatId: options.chatId, input }, options.chatHistory)

const loggerHandler = new ConsoleCallbackHandler(options.logger)
const callbacks = await additionalCallbacks(nodeData, options)

let res: ChainValues = {}
let sourceDocuments: ICommonObject[] = []
let usedTools: IUsedTool[] = []

if (options.socketIO && options.socketIOClientId) {
const handler = new CustomChainHandler(options.socketIO, options.socketIOClientId)
res = await executor.invoke({ input }, { callbacks: [loggerHandler, handler, ...callbacks] })
if (res.sourceDocuments) {
options.socketIO.to(options.socketIOClientId).emit('sourceDocuments', flatten(res.sourceDocuments))
sourceDocuments = res.sourceDocuments
}
if (res.usedTools) {
options.socketIO.to(options.socketIOClientId).emit('usedTools', res.usedTools)
usedTools = res.usedTools
}
} else {
res = await executor.invoke({ input }, { callbacks: [loggerHandler, ...callbacks] })
if (res.sourceDocuments) {
sourceDocuments = res.sourceDocuments
}
if (res.usedTools) {
usedTools = res.usedTools
}
}

await memory.addChatMessages(
[
{
text: input,
type: 'userMessage'
},
{
text: res?.output,
type: 'apiMessage'
}
],
this.sessionId
)

let finalRes = res?.output

if (sourceDocuments.length || usedTools.length) {
finalRes = { text: res?.output }
if (sourceDocuments.length) {
finalRes.sourceDocuments = flatten(sourceDocuments)
}
if (usedTools.length) {
finalRes.usedTools = usedTools
}
return finalRes
}

return finalRes
}
}

const prepareAgent = (
nodeData: INodeData,
flowObj: { sessionId?: string; chatId?: string; input?: string },
chatHistory: IMessage[] = []
) => {
const model = nodeData.inputs?.model as ChatOpenAI
const memory = nodeData.inputs?.memory as FlowiseMemory
const systemMessage = nodeData.inputs?.systemMessage as string
let tools = nodeData.inputs?.tools
tools = flatten(tools)
const memoryKey = memory.memoryKey ? memory.memoryKey : 'chat_history'
const inputKey = memory.inputKey ? memory.inputKey : 'input'

const prompt = ChatPromptTemplate.fromMessages([
['system', systemMessage ? systemMessage : `You are a helpful AI assistant.`],
new MessagesPlaceholder(memoryKey),
['human', `{${inputKey}}`],
new MessagesPlaceholder('agent_scratchpad')
])

const modelWithTools = model.bind({ tools: tools.map(convertToOpenAITool) })

const runnableAgent = RunnableSequence.from([
{
[inputKey]: (i: { input: string; steps: ToolsAgentStep[] }) => i.input,
agent_scratchpad: (i: { input: string; steps: ToolsAgentStep[] }) => formatToOpenAIToolMessages(i.steps),
[memoryKey]: async (_: { input: string; steps: ToolsAgentStep[] }) => {
const messages = (await memory.getChatMessages(flowObj?.sessionId, true, chatHistory)) as BaseMessage[]
return messages ?? []
}
},
prompt,
modelWithTools,
new OpenAIToolsAgentOutputParser()
])

const executor = AgentExecutor.fromAgentAndTools({
agent: runnableAgent,
tools,
sessionId: flowObj?.sessionId,
chatId: flowObj?.chatId,
input: flowObj?.input,
verbose: process.env.DEBUG === 'true' ? true : false
})

return executor
}

module.exports = { nodeClass: OpenAIToolAgent_Agents }
9 changes: 9 additions & 0 deletions packages/components/nodes/agents/OpenAIToolAgent/function.svg
Loading
Sorry, something went wrong. Reload?
Sorry, we cannot display this file.
Sorry, this file is invalid so it cannot be displayed.
Loading

0 comments on commit c11c43c

Please sign in to comment.