-
-
Notifications
You must be signed in to change notification settings - Fork 0
/
prompt.rb
69 lines (56 loc) · 1.56 KB
/
prompt.rb
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
class Prompt
include ActiveModel::Model
MODEL = "gpt-3.5-turbo".freeze
class_attribute :config, :elastic_search
attr_accessor :text, :filters, :response, :context
def initialize(attributes = {})
self.filters = attributes.fetch(:filters, Repository.all.map(&:result_type))
self.context ||= ""
super
end
def result
@result ||= perform
end
def documents
@documents = Search.new(q: text, filters: filters).perform
end
def build_context
current_document = 0
while current_document < documents.length && token_count(context) + token_count(document_for_result(documents[current_document])) < 3800
self.context += document_for_result(documents[current_document])
current_document += 1
end
end
def perform
if text.present?
build_context
reply = client.chat(
parameters: {
model: MODEL,
messages: [{role: "user", content: content}],
temperature: 0.0
}
)
self.response = reply.dig("choices", 0, "message", "content")
end
end
private
def content
<<~CONTENT
#{context}
Using only the information provided above, answer the following question and provide the url where the answer can be found: #{text}
CONTENT
end
def document_for_result(result)
"[#{result.name}](#{result.url})\n\n#{result.description}\n\n"
end
def token_count(string)
encoding.encode(string).size
end
def client
@client ||= OpenAI::Client.new
end
def encoding
@encoding ||= Tiktoken.encoding_for_model(MODEL)
end
end