Working With Your Live Data Using LangChain
Use LangChain with Amazon Bedrock and Amazon DynamoDB and to build applications to keep conversation with LLMs with consistent and engage in a natural dialogue
1
pip install langchain
1
from langchain.llms.bedrock import Bedrock
1
2
3
bedrock_client = boto3.client(
service_name='bedrock-runtime'
)
verbose = True
to make debug and see the internal states of the Chain:1
2
3
4
5
6
from langchain.chains import ConversationChain
model_parameter = {"temperature": 0.0, "top_p": .5, "max_tokens_to_sample": 2000} #parameters define
llm = Bedrock(model_id="anthropic.claude-v2", model_kwargs=model_parameter,client=bedrock_client) #model define
conversation = ConversationChain(
llm=llm, verbose=True
)
1
conversation.predict(input="Hello world!")
1
2
3
4
5
6
7
8
9
10
11
prompt = "Hello world!"
kwargs = {
"modelId": "ai21.j2-ultra-v1",
"contentType": "application/json",
"accept": "*/*",
"body": "{\"prompt\":\"Human:"+ prompt +"\\nAssistant:\",\"maxTokens\":200,\"temperature\":0.7,\"topP\":1,\"stopSequences\":[],\"countPenalty\":{\"scale\":0},\"presencePenalty\":{\"scale\":0},\"frequencyPenalty\":{\"scale\":0}}"
}
response = bedrock_client.invoke_model(**kwargs)
response_body = json.loads(response.get("body").read())
completetion = response_body.get("completions")[0].get("data").get("text")
completetion
1
2
from langchain.memory import ConversationBufferMemory
memory = ConversationBufferMemory(return_messages=True)
1
2
from langchain.memory import ConversationBufferWindowMemory
memory = ConversationBufferMemory(k=1,return_messages=True)
1
2
from langchain.memory import ConversationSummaryMemory
memory = ConversationSummaryMemory(llm=llm,return_messages=True)
1
2
from langchain.memory import ConversationSummaryBufferMemory
memory = ConversationSummaryBufferMemory(llm=llm, max_token_limit=10,return_messages=True)
1
2
from langchain.memory import ConversationTokenBufferMemory
memory = ConversationTokenBufferMemory(llm=llm, max_token_limit=10,return_messages=True)
📚Note: In all types of memory belong the parameter return_messages=True is present, this to get the history as a list of messages
1
2
3
4
5
#add the memory to the Chain
conversation = ConversationChain(
llm=llm, verbose=True, memory=memory
)
1
2
3
4
5
conversation.predict(input="Hi, my name is Elizabeth!")
conversation.predict(input="what's up?")
conversation.predict(input="cool, What is my name?")
memory.load_memory_variables({}) #To print the memory
- Create the Amazon DynamoDB:
1
2
3
4
5
6
7
8
9
10
# Get the service resource.
dynamodb = boto3.resource("dynamodb")
# Create the DynamoDB table.
table = dynamodb.create_table(
TableName="SessionTable",
KeySchema=[{"AttributeName": "SessionId", "KeyType": "HASH"}],
AttributeDefinitions=[{"AttributeName": "SessionId", "AttributeType": "S"}],
BillingMode="PAY_PER_REQUEST",
)
- Add Chat Memory To The Chain:
1
2
3
4
5
6
7
8
9
from langchain.memory.chat_message_histories import DynamoDBChatMessageHistory
message_history = DynamoDBChatMessageHistory(table_name="SessionTable", session_id="1")
memory = ConversationBufferMemory(
memory_key="history", chat_memory=message_history, return_messages=True,ai_prefix="A",human_prefix="H"
)
#add the memory to the Chain
conversation = ConversationChain(
llm=llm, verbose=True, memory=memory
)
- Try It!
1
2
3
4
5
6
7
8
9
conversation.predict(input="Hi, my name is Elizabeth!")
conversation.predict(input="what's up?")
conversation.predict(input="cool, What is my name?")
# Print the memory
memory.load_memory_variables({})
# Print item count
print(table.item_count)
Any opinions in this post are those of the individual author and may not reflect the opinions of AWS.