Building Custom LangChain Agents and Tools with Amazon Bedrock
Learn to build custom prompts and tools for LangChain agents.
1
2
3
git clone https://github.com/build-on-aws/amazon-bedrock-custom-langchain-agent.git
cd amazon-bedrock-custom-langchain-agent
pip install -r requirements.txt
1
2
export LAMBDA_ROLE=arn:aws:iam::ACCOUNT_ID:role/YourLambdaExecutionRole
export S3_BUCKET=your_s3_bucket_name_here
1
2
3
4
5
6
7
8
9
def well_arch_tool(query: str) -> Dict[str, Any]:
"""Returns text from AWS Well-Architected Framework related to the query."""
embeddings = BedrockEmbeddings(
client=bedrock_runtime,
model_id="amazon.titan-embed-text-v1",
)
vectorstore = FAISS.load_local("local_index", embeddings)
docs = vectorstore.similarity_search(query)
return {"docs": docs}
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
def create_lambda_function(
code: str,
function_name: str,
description: str,
has_external_python_libraries: bool,
external_python_libraries: List[str],
) -> str:
"""
Creates and deploys a Lambda Function, based on what the customer requested. Returns the name of the created Lambda function
"""
print("Creating Lambda function")
# !!! HARD CODED !!!
runtime = "python3.9"
handler = "lambda_function.handler"
# Create a zip file for the code
if has_external_python_libraries:
zipfile = lambda_funcs.create_deployment_package_with_dependencies(
code, function_name, f"{function_name}.zip", external_python_libraries
)
else:
zipfile = lambda_funcs.create_deployment_package_no_dependencies(
code, function_name, f"{function_name}.zip"
)
try:
# Upload zip file
# !!! HARD CODED !!!.
zip_key = f"agentaws_resources/{function_name}.zip"
s3.upload_file(zipfile, S3_BUCKET, zip_key)
print(f"Uploaded zip to {S3_BUCKET}/{zip_key}")
response = lambda_client.create_function(
Code={
"S3Bucket": S3_BUCKET,
"S3Key": zip_key,
},
Description=description,
FunctionName=function_name,
Handler=handler,
Timeout=30, # hard coded
Publish=True,
Role=LAMBDA_ROLE,
Runtime=runtime,
)
deployed_function = response["FunctionName"]
user_response = f"The function {deployed_function} has been deployed, to the customer's AWS account. I will now provide my final answer to the customer on how to invoke the {deployed_function} function with boto3 and print the result."
return user_response
except ClientError as e:
print(e)
return f"Error: {e}\n Let me try again..."
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
def setup_full_agent():
# Initialize Amazon Bedrock and LLM
bedrock_runtime = setup_bedrock()
llm = initialize_llm(bedrock_runtime)
# Initialize tools
aws_well_arch_tool = StructuredTool.from_function(well_arch_tool)
create_lambda_function_tool = StructuredTool.from_function(create_lambda_function)
custom_prefix = """...
"""
custom_suffix = """...
"""
chat_message_int = MessagesPlaceholder(variable_name="chat_history")
memory = ConversationBufferMemory(memory_key="chat_history", return_messages=True)
agent_executor = initialize_agent(
[aws_well_arch_tool, create_lambda_function_tool],
llm,
agent=AgentType.STRUCTURED_CHAT_ZERO_SHOT_REACT_DESCRIPTION,
agent_kwargs={
"prefix": custom_prefix,
"suffix": custom_suffix,
"memory_prompts": [chat_message_int],
"input_variables": ["input", "agent_scratchpad", "chat_history"],
},
memory=memory,
verbose=True,
)
return agent_executor
python test_agent.py
from your terminal.@st.cache_resource
decorator comes to the rescue, letting us cache the agent for faster interactions.1
2
3
4
5
def load_llm():
return test_agent.setup_full_agent()
model = load_llm()
StreamlitCallbackHandler
to visualize how the agent picks its tools based on user queries.1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
if prompt := st.chat_input("How can I help??"):
st.session_state.messages.append({"role": "user", "content": prompt})
with st.chat_message("user"):
st.markdown(prompt)
with st.chat_message("assistant"):
message_placeholder = st.empty()
full_response = ""
st_callback = StreamlitCallbackHandler(st.container())
result = test_agent.interact_with_agent_st(
model, prompt, st.session_state.messages, st_callback
)
# Simulate stream of response with milliseconds delay
for chunk in result.split():
full_response += chunk + " "
time.sleep(0.05)
# Add a blinking cursor to simulate typing
message_placeholder.markdown(full_response + "▌")
message_placeholder.markdown(full_response)
st.session_state.messages.append({"role": "assistant", "content": full_response})
- Initialized Amazon Bedrock for our foundation models
- Developed tools for querying the AWS Well-Architected Framework and deploying Lambda functions
- Created a LangChain agent with a well-defined prompt and integrated it with our tools
- Designed a Streamlit chatbot that brings our agent to life
Any opinions in this post are those of the individual author and may not reflect the opinions of AWS.