Skip to content

Commit

Permalink
(feat) Component code gen: updating to o1-mini model, upgrading langc…
Browse files Browse the repository at this point in the history
…hain* API (#13938)
  • Loading branch information
dylburger authored Sep 12, 2024
1 parent d81cb68 commit a46999e
Show file tree
Hide file tree
Showing 5 changed files with 1,425 additions and 1,075 deletions.
5 changes: 1 addition & 4 deletions packages/component_code_gen/.env.example
Original file line number Diff line number Diff line change
Expand Up @@ -3,9 +3,6 @@ BROWSERLESS_API_KEY=your-browserless-api-key
SUPABASE_URL=https://your-supabase-url.supabase.co
SUPABASE_API_KEY=your-supabase-service-role-key

OPENAI_API_TYPE=azure
OPENAI_DEPLOYMENT_NAME=deployment-name
OPENAI_API_BASE=https://resource-name.openai.azure.com
OPENAI_API_KEY=azure-api-key
OPENAI_MODEL=gpt-4-32k
OPENAI_MODEL=o1-preview
OPENAI_EMBEDDINGS_MODEL=text-embedding-3-small
10 changes: 1 addition & 9 deletions packages/component_code_gen/config/config.py
Original file line number Diff line number Diff line change
Expand Up @@ -22,15 +22,7 @@ def get_env_var(var_name, required=False, default=None):
"openai_embeddings_model": openai_embeddings_model,
"openai": {
"api_key": get_env_var("OPENAI_API_KEY", required=openai_api_type == "openai"),
"model": get_env_var("OPENAI_MODEL", default="gpt-4-0125-preview"),
},
"azure": {
"deployment_name": get_env_var("OPENAI_DEPLOYMENT_NAME", required=openai_api_type == "azure"),
"embeddings_deployment_name": get_env_var("OPENAI_EMBEDDINGS_DEPLOYMENT_NAME", required=openai_api_type == "azure"),
"api_version": get_env_var("OPENAI_API_VERSION", default="2024-03-01-preview"),
"api_base": get_env_var("OPENAI_API_BASE", required=openai_api_type == "azure"),
"api_key": get_env_var("OPENAI_API_KEY", required=openai_api_type == "azure"),
"model": get_env_var("OPENAI_MODEL", default="gpt-4-32k"),
"model": get_env_var("OPENAI_MODEL", default="o1-mini"),
},
"browserless": {
"api_key": get_env_var("BROWSERLESS_API_KEY"),
Expand Down
55 changes: 19 additions & 36 deletions packages/component_code_gen/helpers/langchain_helpers.py
Original file line number Diff line number Diff line change
@@ -1,19 +1,16 @@
from templates.common.suffix import suffix
from templates.common.format_instructions import format_instructions
from templates.common.docs_system_instructions import docs_system_instructions
from langchain.schema import (
# AIMessage,
HumanMessage,
SystemMessage
)
from langchain.tools.json.tool import JsonSpec
from langchain.agents.agent_toolkits.json.toolkit import JsonToolkit
from langchain.chat_models import ChatOpenAI, AzureChatOpenAI
from langchain.llms.openai import OpenAI
from langchain.agents import create_json_agent, ZeroShotAgent, AgentExecutor
from langchain.schema import HumanMessage
from langchain.agents.react.agent import create_react_agent
from langchain_community.agent_toolkits import JsonToolkit, create_json_agent
from langchain_community.tools.json.tool import JsonSpec

import openai
from langchain_openai.chat_models.base import ChatOpenAI
from langchain.agents import ZeroShotAgent, AgentExecutor
from langchain.chains import LLMChain
from config.config import config
import openai # required
from dotenv import load_dotenv
load_dotenv()

Expand All @@ -32,22 +29,15 @@ def __init__(self, docs, templates, auth_example, parsed_common_files):
system_instructions = format_template(
f"{templates.system_instructions(auth_example, parsed_common_files)}\n{docs_system_instructions}")

model = ChatOpenAI(model_name=config['openai']['model'])
tools = OpenAPIExplorerTool.create_tools(docs)
tool_names = [tool.name for tool in tools]

prompt_template = ZeroShotAgent.create_prompt(
tools=tools,
prefix=system_instructions,
suffix=suffix,
format_instructions=format_instructions,
input_variables=['input', 'agent_scratchpad']
)

llm_chain = LLMChain(llm=get_llm(), prompt=prompt_template)
agent = ZeroShotAgent(llm_chain=llm_chain, allowed_tools=tool_names)
verbose = True if config['logging']['level'] == 'DEBUG' else False

self.agent_executor = AgentExecutor.from_agent_and_tools(
# o1-preview doesn't support system instruction, so we just concatenate into the prompt
prompt = f"{system_instructions}\n\n{format_instructions}"

agent = create_react_agent(model, tools, prompt)
verbose = True if config['logging']['level'] == 'DEBUG' else False
self.agent_executor = AgentExecutor(
agent=agent, tools=tools, verbose=verbose)

def run(self, input):
Expand Down Expand Up @@ -87,15 +77,9 @@ def create_user_prompt(prompt, urls_content):


def get_llm():
if config['openai_api_type'] == "azure":
azure_config = config["azure"]
return AzureChatOpenAI(deployment_name=azure_config['deployment_name'],
model_name=azure_config["model"], temperature=config["temperature"], request_timeout=300)
else:
openai_config = config["openai"]
print(f"Using OpenAI API: {openai_config['model']}")
return ChatOpenAI(
model_name=openai_config["model"], temperature=config["temperature"])
openai_config = config["openai"]
print(f"Using OpenAI API: {openai_config['model']}")
return ChatOpenAI(model_name=openai_config["model"], temperature=1)


def ask_agent(prompt, docs, templates, auth_example, parsed_common_files, urls_content):
Expand All @@ -111,8 +95,7 @@ def no_docs(prompt, templates, auth_example, parsed_common_files, urls_content,
pd_instructions = format_template(
templates.system_instructions(auth_example, parsed_common_files))

result = get_llm()(messages=[
SystemMessage(content="You are the most intelligent software engineer in the world. You carefully provide accurate, factual, thoughtful, nuanced code, and are brilliant at reasoning. Follow all of the instructions below — they are all incredibly important. This code will be shipped directly to production, so it's important that it's accurate and complete."),
result = get_llm().invoke([
HumanMessage(content=user_prompt +
pd_instructions if normal_order else pd_instructions+user_prompt),
])
Expand Down
Loading

0 comments on commit a46999e

Please sign in to comment.