Future-proof Your .NET Apps With Foundation Model Choice and Amazon Bedrock
Why .NET developers should consider Amazon Bedrock for their Generative AI empowered apps with getting started code snippets and a full code sample.
- AWSSDK.Bedrock
- AWSSDK.BedrockRuntime
1
2
dotnet add package AWSSDK.Bedrock
dotnet add package AWSSDK.BedrockRuntime
- InvokeModel
- InvokeModelWithResponseStream
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
internal class Program
{
static async Task Main(string[] args)
{
// Instantiate the client object
AmazonBedrockRuntimeClient client = new(
new AmazonBedrockRuntimeConfig()
{
RegionEndpoint = RegionEndpoint.USEast1
}
);
// Instantiate the request object
InvokeModelRequest request = new InvokeModelRequest()
{
// Set ContentType property to indicate we are sending inference parameters as a Json object. For Stable Diffusion model, it could be 'img/png'
ContentType = "application/json",
// Set Accept property to indicate we are expecting result as Json object. Again, for Stable Diffusion model, it could also be 'img/png'
Accept = "application/json",
// Set ModelId property which foundation model you cant to invoke. You can find the list of Model Ids you can use at https://docs.aws.amazon.com/bedrock/latest/userguide/model-ids-arns.html
ModelId = "anthropic.claude-v2",
// Serialize to a MemoryStream the Json object containing the inference parameters expected by the model. Each foundation model expects a different set of inference parameters in different formats (application/json for most of them). It is up to you to know what you need to provide in this property and the appropriate format.
Body = new MemoryStream(
Encoding.UTF8.GetBytes(
JsonSerializer.Serialize(new
{
prompt = "\n\nHuman:Explain how async/await work in .NET and provide a code example\n\nAssistant:",
max_tokens_to_sample = 2000
})
)
)
};
// Call the InvokeModelAsync method. It actually calls the InvokeModel action from the Amazon Bedrock API
InvokeModelResponse response = await client.InvokeModelAsync(request);
// Check the HttpStatusCode to ensure successful completion of the request
if (response.HttpStatusCode == System.Net.HttpStatusCode.OK)
{
// Deserialize the Json object returned in a plain old C# object (POCO). Here, we use the ClaudeBodyResponse internal class defined bellow
ClaudeBodyResponse? body = await JsonSerializer.DeserializeAsync<ClaudeBodyResponse>(
response.Body,
new JsonSerializerOptions() {
PropertyNameCaseInsensitive = true
});
// Write the completion string to the console
Console.WriteLine(body?.Completion);
}
else
{
Console.WriteLine("Something wrong happened");
}
}
/// <summary>
/// Internal POCO defining Claude response
/// </summary>
internal class ClaudeBodyResponse
{
/// <summary>
/// Claude text answer
/// </summary>
public string? Completion { get; set; }
/// <summary>
/// Reason the model as stopped to generate text.
/// It may have reached the max number of token to generate.
/// It may have reached a state where the most likely next
/// word is no word but the end of the text.
/// </summary>
public string? Stop_Reason { get; set; }
}
}
- A text playground allowing you to exercise your prompt engineering skills with different models
- A chat playground allowing you to chat with different models
- A voice chat playground allowing you to discuss with different models
- An image playground allowing you to generate images from text prompts
Any opinions in this post are those of the individual author and may not reflect the opinions of AWS.