Skip to content

Commit

Permalink
Merge pull request #8 from edgararuiz/updates
Browse files Browse the repository at this point in the history
Adds Ollama tests
  • Loading branch information
edgararuiz committed Sep 12, 2024
2 parents ef6472f + c775af9 commit 493525b
Show file tree
Hide file tree
Showing 28 changed files with 498 additions and 60 deletions.
52 changes: 52 additions & 0 deletions .github/workflows/R-CMD-check.yaml
Original file line number Diff line number Diff line change
@@ -0,0 +1,52 @@
# Workflow derived from https://github.com/r-lib/actions/tree/v2/examples
# Need help debugging build failures? Start at https://github.com/r-lib/actions#where-to-find-help
on:
push:
branches: main
pull_request:
branches: main

name: R-CMD-check.yaml

permissions: read-all

jobs:
R-CMD-check:
runs-on: ${{ matrix.config.os }}

name: ${{ matrix.config.os }} (${{ matrix.config.r }})

strategy:
fail-fast: false
matrix:
config:
- {os: macos-latest, r: 'release'}
- {os: windows-latest, r: 'release'}
- {os: ubuntu-latest, r: 'devel', http-user-agent: 'release'}
- {os: ubuntu-latest, r: 'release'}
- {os: ubuntu-latest, r: 'oldrel-1'}

env:
GITHUB_PAT: ${{ secrets.GITHUB_TOKEN }}
R_KEEP_PKG_SOURCE: yes

steps:
- uses: actions/checkout@v4

- uses: r-lib/actions/setup-pandoc@v2

- uses: r-lib/actions/setup-r@v2
with:
r-version: ${{ matrix.config.r }}
http-user-agent: ${{ matrix.config.http-user-agent }}
use-public-rspm: true

- uses: r-lib/actions/setup-r-dependencies@v2
with:
extra-packages: any::rcmdcheck
needs: check

- uses: r-lib/actions/check-r-package@v2
with:
upload-snapshots: true
build_args: 'c("--no-manual","--compact-vignettes=gs+qpdf")'
1 change: 1 addition & 0 deletions NAMESPACE
Original file line number Diff line number Diff line change
Expand Up @@ -32,6 +32,7 @@ import(glue)
import(rlang)
importFrom(dplyr,bind_cols)
importFrom(dplyr,mutate)
importFrom(dplyr,pull)
importFrom(dplyr,tibble)
importFrom(jsonlite,fromJSON)
importFrom(ollamar,chat)
Expand Down
2 changes: 1 addition & 1 deletion R/llm-classify.R
Original file line number Diff line number Diff line change
Expand Up @@ -44,7 +44,7 @@ llm_classify.data.frame <- function(.data,
llm_vec_classify <- function(x,
labels,
additional_prompt = "") {
llm_vec_prompt(
l_vec_prompt(
x = x,
prompt_label = "classify",
additional_prompt = additional_prompt,
Expand Down
29 changes: 5 additions & 24 deletions R/llm-custom.R
Original file line number Diff line number Diff line change
Expand Up @@ -40,28 +40,9 @@ llm_custom.data.frame <- function(.data,
#' @rdname llm_custom
#' @export
llm_vec_custom <- function(x, prompt = "", valid_resps = NULL) {
llm_use(.silent = TRUE, force = FALSE)
if (!inherits(prompt, "list")) {
p_split <- strsplit(prompt, "\\{\\{x\\}\\}")[[1]]
if (length(p_split) == 1 && p_split == prompt) {
content <- glue("{prompt}\n{{x}}")
} else {
content <- prompt
}
prompt <- list(list(role = "user", content = content))
}
resp <- m_backend_submit(defaults_get(), x, prompt)
if (!is.null(valid_resps)) {
errors <- !resp %in% valid_resps
resp[errors] <- NA
if (any(errors)) {
cli_alert_warning(
c(
"There were {sum(errors)} predictions with ",
"invalid output, they were coerced to NA"
)
)
}
}
resp
l_vec_prompt(
x = x,
prompt = prompt,
valid_resps = valid_resps
)
}
5 changes: 3 additions & 2 deletions R/llm-extract.R
Original file line number Diff line number Diff line change
Expand Up @@ -31,8 +31,9 @@ llm_extract.data.frame <- function(.data,
additional_prompt = "",
pred_name = ".extract") {
if (expand_cols && length(labels) > 1) {
text <- pull(.data, {{ col }})
resp <- llm_vec_extract(
x = .data$col,
x = text,
labels = labels,
additional_prompt = additional_prompt
)
Expand Down Expand Up @@ -76,7 +77,7 @@ llm_extract.data.frame <- function(.data,
llm_vec_extract <- function(x,
labels = c(),
additional_prompt = "") {
resp <- llm_vec_prompt(
resp <- l_vec_prompt(
x = x,
prompt_label = "extract",
labels = labels,
Expand Down
2 changes: 1 addition & 1 deletion R/llm-sentiment.R
Original file line number Diff line number Diff line change
Expand Up @@ -53,7 +53,7 @@ globalVariables("ai_analyze_sentiment")
llm_vec_sentiment <- function(x,
options = c("positive", "negative", "neutral"),
additional_prompt = "") {
llm_vec_prompt(
l_vec_prompt(
x = x,
prompt_label = "sentiment",
additional_prompt = additional_prompt,
Expand Down
2 changes: 1 addition & 1 deletion R/llm-summarize.R
Original file line number Diff line number Diff line change
Expand Up @@ -52,7 +52,7 @@ globalVariables("ai_summarize")
llm_vec_summarize <- function(x,
max_words = 10,
additional_prompt = "") {
llm_vec_prompt(
l_vec_prompt(
x = x,
prompt_label = "summarize",
additional_prompt = additional_prompt,
Expand Down
2 changes: 1 addition & 1 deletion R/llm-translate.R
Original file line number Diff line number Diff line change
Expand Up @@ -39,7 +39,7 @@ llm_vec_translate <- function(
x,
language,
additional_prompt = "") {
llm_vec_prompt(
l_vec_prompt(
x = x,
prompt_label = "translate",
additional_prompt = additional_prompt,
Expand Down
4 changes: 2 additions & 2 deletions R/llm-use.R
Original file line number Diff line number Diff line change
Expand Up @@ -7,8 +7,8 @@
#' @param .silent Avoids console output
#' @param model The name of model supported by the back-end provider
#' @param ... Additional arguments that this function will pass down to the
#' integrating function. In the case of Ollama, it will pass those argument to
#' `ollamar::generate()`.
#' integrating function. In the case of Ollama, it will pass those arguments to
#' `ollamar::chat()`.
#' @param force Flag that tell the function to reset all of the settings in the
#' R session
#'
Expand Down
66 changes: 51 additions & 15 deletions R/m-backend-prompt.R
Original file line number Diff line number Diff line change
Expand Up @@ -93,19 +93,55 @@ m_backend_prompt.mall_defaults <- function(backend, additional = "") {
)
}

get_prompt <- function(label, ..., .additional = "") {
defaults <- m_backend_prompt(defaults_get(), additional = .additional)
fn <- defaults[[label]]
fn(...)
}


llm_vec_prompt <- function(x,
prompt_label = "",
additional_prompt = "",
valid_resps = NULL,
...) {
llm_use(.silent = TRUE, force = FALSE)
prompt <- get_prompt(prompt_label, ..., .additional = additional_prompt)
llm_vec_custom(x, prompt, valid_resps = valid_resps)
l_vec_prompt <- function(x,
prompt_label = "",
additional_prompt = "",
valid_resps = NULL,
prompt = NULL,
...) {
# Initializes session LLM
backend <- llm_use(.silent = TRUE, force = FALSE)
# If there is no 'prompt', then assumes that we're looking for a
# prompt label (sentiment, classify, etc) to set 'prompt'
if (is.null(prompt)) {
defaults <- m_backend_prompt(
backend = backend,
additional = additional_prompt
)
fn <- defaults[[prompt_label]]
prompt <- fn(...)
}
# If the prompt is a character, it will convert it to
# a list so it can be processed
if (!inherits(prompt, "list")) {
p_split <- strsplit(prompt, "\\{\\{x\\}\\}")[[1]]
if (length(p_split) == 1 && p_split == prompt) {
content <- glue("{prompt}\n{{x}}")
} else {
content <- prompt
}
prompt <- list(
list(role = "user", content = content)
)
}
# Submits final prompt to the LLM
resp <- m_backend_submit(
backend = backend,
x = x,
prompt = prompt
)
# Checks for invalid output and marks them as NA
if (!is.null(valid_resps)) {
errors <- !resp %in% valid_resps
resp[errors] <- NA
if (any(errors)) {
cli_alert_warning(
c(
"There were {sum(errors)} predictions with ",
"invalid output, they were coerced to NA"
)
)
}
}
resp
}
12 changes: 2 additions & 10 deletions R/m-backend-submit.R
Original file line number Diff line number Diff line change
Expand Up @@ -32,21 +32,13 @@ m_backend_submit.mall_ollama <- function(backend, x, prompt) {
}

#' @export
m_backend_submit.mall_simulate_llm <- function(backend, x, base_prompt) {
m_backend_submit.mall_simulate_llm <- function(backend, x, prompt) {
args <- backend
class(args) <- "list"
if (args$model == "pipe") {
out <- trimws(strsplit(x, "\\|")[[1]][[2]])
} else if (args$model == "prompt") {
out <- glue("{base_prompt}\n{x}")
out <- map_chr(x, \(x) trimws(strsplit(x, "\\|")[[1]][[2]]))
} else if (args$model == "echo") {
out <- x
} else {
out <- list(
x = x,
base_prompt = base_prompt,
backend = args
)
}
out
}
2 changes: 1 addition & 1 deletion R/mall.R
Original file line number Diff line number Diff line change
@@ -1,5 +1,5 @@
#' @importFrom ollamar chat test_connection list_models
#' @importFrom dplyr mutate tibble bind_cols
#' @importFrom dplyr mutate tibble bind_cols pull
#' @importFrom utils menu
#' @importFrom jsonlite fromJSON
#' @import rlang
Expand Down
4 changes: 2 additions & 2 deletions man/llm_use.Rd

Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.

44 changes: 44 additions & 0 deletions tests/testthat/_snaps/llm-classify.md
Original file line number Diff line number Diff line change
@@ -0,0 +1,44 @@
# Classify on Ollama works

Code
llm_classify(reviews, review, labels = c("appliance", "computer"))
Output
review
1 This has been the best TV I've ever used. Great screen, and sound.
2 I regret buying this laptop. It is too slow and the keyboard is too noisy
3 Not sure how to feel about my new washing machine. Great color, but hard to figure
.classify
1 appliance
2 computer
3 appliance

---

Code
llm_classify(reviews, review, pred_name = "new", labels = c("appliance",
"computer"))
Output
review
1 This has been the best TV I've ever used. Great screen, and sound.
2 I regret buying this laptop. It is too slow and the keyboard is too noisy
3 Not sure how to feel about my new washing machine. Great color, but hard to figure
new
1 appliance
2 computer
3 appliance

---

Code
llm_classify(reviews, review, pred_name = "new", labels = c("appliance",
"computer"), additional_prompt = "Consider all laptops as appliances.")
Output
review
1 This has been the best TV I've ever used. Great screen, and sound.
2 I regret buying this laptop. It is too slow and the keyboard is too noisy
3 Not sure how to feel about my new washing machine. Great color, but hard to figure
new
1 appliance
2 appliance
3 appliance

14 changes: 14 additions & 0 deletions tests/testthat/_snaps/llm-custom.md
Original file line number Diff line number Diff line change
@@ -0,0 +1,14 @@
# Custom on Ollama works

Code
llm_custom(reviews_table(), review, my_prompt)
Output
review
1 This has been the best TV I've ever used. Great screen, and sound.
2 I regret buying this laptop. It is too slow and the keyboard is too noisy
3 Not sure how to feel about my new washing machine. Great color, but hard to figure
.pred
1 Yes
2 No
3 No

14 changes: 14 additions & 0 deletions tests/testthat/_snaps/llm-extract.md
Original file line number Diff line number Diff line change
@@ -0,0 +1,14 @@
# Extract on Ollama works

Code
llm_extract(reviews_table(), review, "product")
Output
review
1 This has been the best TV I've ever used. Great screen, and sound.
2 I regret buying this laptop. It is too slow and the keyboard is too noisy
3 Not sure how to feel about my new washing machine. Great color, but hard to figure
.extract
1 tv
2 laptop
3 washing machine

Loading

0 comments on commit 493525b

Please sign in to comment.