-
Notifications
You must be signed in to change notification settings - Fork 14
/
decision.rs
50 lines (45 loc) · 1.76 KB
/
decision.rs
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
use llm_client::prelude::*;
/// Multiple reason requests across a temp gradient with the winner being returned. Currently, decision is bound to the one_round reasoning workflow. Workflows relying on grammars are only supported by local LLMs.
#[tokio::main(flavor = "current_thread")]
pub async fn main() {
// Using a preset model from Hugging Face
let llm_client = LlmClient::llama_cpp()
.mistral7b_instruct_v0_3()
.init()
.await
.unwrap();
// A boolean decision request
let response = llm_client
.reason()
.boolean()
.decision()
.set_instructions("Is the sky blue?")
.return_primitive()
.await
.unwrap();
assert!(response);
// An integer decision request
let mut reason_request = llm_client.reason().integer();
// Settings specific to the primitive can be accessed through the primitive field
reason_request.primitive.lower_bound(0).upper_bound(10);
let mut decision_request = reason_request.decision();
let response = decision_request
.set_instructions("How many fingers do you have?")
.return_primitive()
.await
.unwrap();
assert_eq!(response, 5);
// Options
let mut decision_request = llm_client.reason().integer().decision();
// Set the number of 'votes', or rounds of reasoning, to be conducted
decision_request.best_of_n_votes(5);
// Uses a temperature gradient for each round of reasoning
decision_request.dynamic_temperature(true);
// An integer request, but with an optional response
let response = decision_request
.set_instructions("How many coins are in my pocket?")
.return_optional_primitive()
.await
.unwrap();
assert_eq!(response, None);
}