Skip to content

Commit

Permalink
first version
Browse files Browse the repository at this point in the history
  • Loading branch information
kumeS committed Aug 10, 2023
1 parent 062446c commit 0dd2307
Show file tree
Hide file tree
Showing 13 changed files with 117 additions and 93 deletions.
6 changes: 4 additions & 2 deletions R/TextSummary.R
Original file line number Diff line number Diff line change
Expand Up @@ -10,6 +10,7 @@
#' @param Model A character string specifying the AI model to be used for text summarization. Default is "gpt-3.5-turbo".
#' @param temperature Numeric specifying the randomness of the AI model's output.
#' @param language A character string specifying the language in which the summary should be generated. Default is "English".
#' @param verbose A logical flag to print the message Default is TRUE.
#' @importFrom magrittr %>%
#' @importFrom purrr map
#' @importFrom clipr read_clip
Expand All @@ -31,7 +32,8 @@ TextSummary <- function(text = clipr::read_clip(),
Summary_block = 200,
Model = "gpt-3.5-turbo",
temperature = 1,
language = "English"){ # Default language changed to English
language = "English",
verbose = TRUE){ # Default language changed to English
# Asserting input types and values
assertthat::assert_that(assertthat::is.string(text[1]))
assertthat::assert_that(assertthat::is.count(nch), nch > 0)
Expand Down Expand Up @@ -78,7 +80,7 @@ TextSummary <- function(text = clipr::read_clip(),

# Execution
for(n in seq_len(length(pr))){
cat(n, "\n")
if(verbose){cat(n, "\n")}

retry_count <- 0
while (retry_count < 5) {
Expand Down
103 changes: 51 additions & 52 deletions R/conversation4R.R
Original file line number Diff line number Diff line change
@@ -1,102 +1,103 @@
#' Conversation for R
#' Conversation Interface for R with OpenAI
#'
#' This function manages a conversation with OpenAI's GPT model.
#' This function provides an interface to communicate with OpenAI's models using R. It maintains a conversation history and allows for initialization of a new conversation.
#'
#' @title Conversation for R
#' @description This function uses the OpenAI API to manage a conversation with the specified model.
#' @param message The message to send to the model. Must be a string.
#' @param api_key A string. Your OpenAI API key. Defaults to the value of the environment variable "OPENAI_API_KEY".
#' @param template1 The initial template for the conversation. Must be a string. Default is an empty string.
#' @param ConversationBufferWindowMemory_k The number of previous messages to keep in memory.
#' Must be a positive integer. Default is 2.
#' @param Model The model to use for the chat completion. Must be a string. Default is "gpt-3.5-turbo-16k".
#' @param initialization Whether to initialize the chat history. Must be a logical value. Default is FALSE.
#' @param output Whether to return the output. Must be a logical value. Default is FALSE.
#' @importFrom httr add_headers POST content
#' @importFrom jsonlite toJSON
#' @title Conversation Interface for R
#' @description Interface to communicate with OpenAI's models using R, maintaining a conversation history and allowing for initialization of a new conversation.
#' @param message A string containing the message to be sent to the model.
#' @param api_key A string containing the OpenAI API key. Default is retrieved from the system environment variable "OPENAI_API_KEY".
#' @param template A string containing the template for the conversation. Default is an empty string.
#' @param ConversationBufferWindowMemory_k An integer representing the conversation buffer window memory. Default is 2.
#' @param Model A string representing the model to be used. Default is "gpt-3.5-turbo-16k".
#' @param language A string representing the language to be used in the conversation. Default is "English".
#' @param initialization A logical flag to initialize a new conversation. Default is FALSE.
#' @param verbose A logical flag to print the conversation. Default is TRUE.
#' @importFrom assertthat assert_that is.string is.count is.flag
#' @return A string containing the conversation history.
#' @return Prints the conversation if verbose is TRUE. No return value.
#' @export conversation4R
#' @author Satoshi Kume
#' @examples
#' \dontrun{
#' message <- "Hello, how are you?"
#' api_key <- "your_api_key"
#' conversation4R(message, api_key = api_key)
#' conversation4R(message = "Hello, OpenAI!",
#' api_key = "your_api_key_here",
#' language = "English",
#' initialization = TRUE)
#' }

conversation4R <- function(message,
api_key = Sys.getenv("OPENAI_API_KEY"),
template1 = "",
template = "",
ConversationBufferWindowMemory_k = 2,
Model = "gpt-3.5-turbo-16k",
language = "English",
initialization = FALSE,
output = FALSE){
verbose = TRUE){

# Assertions to verify the types of the input parameters
assertthat::assert_that(assertthat::is.string(message))
assertthat::assert_that(assertthat::is.string(api_key))
assertthat::assert_that(assertthat::is.string(template1))
assertthat::assert_that(assertthat::is.string(template))
assertthat::assert_that(assertthat::is.count(ConversationBufferWindowMemory_k))
assertthat::assert_that(assertthat::is.string(Model))
assertthat::assert_that(assertthat::is.flag(initialization))
assertthat::assert_that(assertthat::is.flag(output))

# Initialization
if(!exists("chat_history")){
chat_history <- new.env()
chat_history$history <- c()
chat_history <- new.env()
chat_history$history <- c()
} else {
if(initialization){
chat_history <- new.env()
chat_history$history <- c()
}
}
if(initialization){
chat_history <- new.env()
chat_history$history <- c()
}}

# Define
temperature = 1

# Prompt Template
if(template1 == ""){
template1 = "You are an excellent assistant.\nPlease reply in English."
if(template == ""){
template = paste0("You are an excellent assistant. Please reply in ", language, ".")
}

template2 = "
History:%s"

template3 = "
Human: %s"
Human: %s"

template4 = "
Assistant: %s"
Assistant: %s"

if(identical(as.character(chat_history$history), character(0))){

res <- chatAI4R::chat4R(content=message,
api_key=api_key,
Model = Model,
temperature = temperature)
chat_historyR <- list(
list(role = "system", content = template),
list(role = "user", content = message))

# Run
res <- chatAI4R::chat4R_history(history = chat_historyR,
api_key = api_key,
Model = Model,
temperature = temperature)


template3s <- sprintf(template3, message)
template4s <- sprintf(template4, res)

chat_history$history <- list(
list(role = "system", content = template1),
list(role = "system", content = template),
list(role = "user", content = message),
list(role = "assistant", content = res)
)

out <- c(template1,
out <- c(paste0("System: ", template),
crayon::red(template3s),
crayon::blue(template4s))

if(output){
return(res)
}else{
return(cat(out))
if(verbose){
cat(out)
}

}
}else{

if(!identical(as.character(chat_history$history), character(0))){

Expand Down Expand Up @@ -130,19 +131,17 @@ rr <- c(rr, r)

template2s <- sprintf(template2, paste0(rr, collapse = ""))

out <- c(template1,
out <- c(paste0("System: ", template),
template2s,
crayon::red(sprintf(template3, new_conversation[[1]]$content)),
crayon::blue(sprintf(template4, assistant_conversation[[1]]$content)))

chat_history$history <<- chat_historyR
chat_history$history <- chat_historyR

if(output){
return(res)
}else{
return(cat(out))
if(verbose){
cat(out)
}

}
}

}
16 changes: 9 additions & 7 deletions R/createFunction4R.R
Original file line number Diff line number Diff line change
Expand Up @@ -11,6 +11,7 @@
#' @param max_tokens An integer that specifies the maximum number of tokens to be returned by the AI model. Default is 250.
#' @param roxygen A logical that indicates whether to include roxygen comments in the generated function. Default is TRUE.
#' @param View A logical that indicates whether to view the intermediate steps. Default is TRUE.
#' @param verbose A logical flag to print the message Default is TRUE.
#' @importFrom magrittr %>%
#' @importFrom crayon red
#' @importFrom assertthat assert_that is.string is.count noNA
Expand All @@ -29,7 +30,8 @@ createFunction4R <- function(Func_description,
max_tokens = 250,
View = TRUE,
roxygen = TRUE,
api_key = Sys.getenv("OPENAI_API_KEY")){
api_key = Sys.getenv("OPENAI_API_KEY"),
verbose = TRUE){

# Validate inputs
assertthat::assert_that(assertthat::is.string(Func_description))
Expand Down Expand Up @@ -57,7 +59,7 @@ Please write the R code for this function without any example usage and note.
template1s <- sprintf(template1, Func_description, packages)

# 01 Run function creation
cat(crayon::red("01 Run function creation \n"))
if(verbose){cat(crayon::red("01 Run function creation \n"))}
f <- completions4R(prompt = template1s,
api_key = api_key,
max_tokens = max_tokens,
Expand All @@ -78,7 +80,7 @@ Please itemize and suggest improvements to this function.
", f)

# 02 Propose improvements to the function
cat(crayon::red("02 Propose improvements to the function \n"))
if(verbose){cat(crayon::red("02 Propose improvements to the function \n"))}
f1 <- completions4R(prompt = template2,
api_key = api_key,
max_tokens = max_tokens,
Expand All @@ -103,7 +105,7 @@ R script: ", f)
template3s <- sprintf(template3, Func_description, packages)

# 03 Improve the function
cat(crayon::red("03 Improve the function \n"))
if(verbose){cat(crayon::red("03 Improve the function \n"))}
f2 <- completions4R(prompt = template3s,
api_key = api_key,
max_tokens = max_tokens,
Expand All @@ -129,7 +131,7 @@ Please write roxygen comments only for the following R code.
Function: ", f2)

# 04 Include roxygen comments
cat(crayon::red("04 Include roxygen comments \n"))
if(verbose){cat(crayon::red("04 Include roxygen comments \n"))}
f3 <- completions4R(prompt = template4,
api_key = api_key,
max_tokens = max_tokens,
Expand All @@ -150,14 +152,14 @@ if(View){
}

# View results
cat(crayon::red("Finished!!"))
if(verbose){cat(crayon::red("Finished!!"))}

return(f4)

}else{

# View results
cat(crayon::red("Finished!!"))
if(verbose){cat(crayon::red("Finished!!"))}

return(f2)
}
Expand Down
6 changes: 4 additions & 2 deletions R/img2img_StableDiffusion4R.R
Original file line number Diff line number Diff line change
Expand Up @@ -17,6 +17,7 @@
#' Other possible values are 'stable-diffusion-512-v2-1', 'stable-diffusion-xl-beta-v2-2-2', 'stable-diffusion-768-v2-1'.
#' @param api_host A string. The host of the Stable Diffusion API. Default is 'https://api.stability.ai'.
#' @param api_key A string. The API key for the Stable Diffusion API. It is read from the 'DreamStudio_API_KEY' environment variable by default.
#' @param verbose A logical flag to print the message Default is TRUE.
#' @importFrom assertthat assert_that is.string is.number is.count noNA
#' @importFrom httr add_headers POST http_status content
#' @importFrom jsonlite fromJSON
Expand Down Expand Up @@ -49,7 +50,8 @@ img2img_StableDiffusion4R <- function(
style_preset = "photographic",
engine_id = "stable-diffusion-v1-5",
api_host = "https://api.stability.ai",
api_key = Sys.getenv("DreamStudio_API_KEY")
api_key = Sys.getenv("DreamStudio_API_KEY"),
verbose = TRUE
) {

# Verify if text_prompts is not empty or NULL
Expand Down Expand Up @@ -141,7 +143,7 @@ if(is.null(sampler)){
result <- list()

for (i in seq_len(number_of_images)) {
cat("Generating", i, "image\n")
if(verbose){cat("Generating", i, "image\n")}

response <- httr::POST(uri,
body = payload,
Expand Down
6 changes: 4 additions & 2 deletions R/img2img_upscale_StableDiffusion4R.R
Original file line number Diff line number Diff line change
Expand Up @@ -9,6 +9,7 @@
#' @param Original_Image A logical. If TRUE, the original image is included in the result. Default is TRUE.
#' @param Flop A logical. If TRUE, the up-scaled image is flopped. Default is TRUE.
#' @param Flip A logical. If FALSE, the up-scaled image is not flipped. Default is FALSE.
#' @param verbose A logical flag to print the message Default is TRUE.
#' @importFrom assertthat assert_that is.string is.count noNA
#' @importFrom httr add_headers POST http_status content
#' @importFrom jsonlite fromJSON
Expand All @@ -33,7 +34,8 @@ img2img_upscale_StableDiffusion4R <- function(
Original_Image = TRUE,
Flop = TRUE,
Flip = FALSE,
api_key = Sys.getenv("DreamStudio_API_KEY")
api_key = Sys.getenv("DreamStudio_API_KEY"),
verbose = TRUE
) {
# Verify if init_image_path is not empty or NULL
if (is.null(init_image_path) || init_image_path == "") {
Expand Down Expand Up @@ -65,7 +67,7 @@ img2img_upscale_StableDiffusion4R <- function(

# Creating empty variable
result <- list()
cat("Generating an up-scaled image\n")
if(verbose){cat("Generating an up-scaled image\n")}

response <- httr::POST(uri,
body = payload,
Expand Down
6 changes: 4 additions & 2 deletions R/txt2img_StableDiffusion4R.R
Original file line number Diff line number Diff line change
Expand Up @@ -17,6 +17,7 @@
#' Other possible values are 'stable-diffusion-v1-5', 'stable-diffusion-xl-beta-v2-2-2', 'stable-diffusion-768-v2-1'.
#' @param api_host A string. The host of the Stable Diffusion API. Default is 'https://api.stability.ai'.
#' @param api_key A string. The API key for the Stable Diffusion API. It is read from the 'DreamStudio_API_KEY' environment variable by default.
#' @param verbose A logical flag to print the message Default is TRUE.
#' @importFrom assertthat assert_that is.string is.count noNA
#' @importFrom httr add_headers POST http_status content
#' @importFrom jsonlite fromJSON
Expand Down Expand Up @@ -48,7 +49,8 @@ txt2img_StableDiffusion4R <- function(
style_preset = "photographic",
engine_id = "stable-diffusion-512-v2-1",
api_host = "https://api.stability.ai",
api_key = Sys.getenv("DreamStudio_API_KEY")
api_key = Sys.getenv("DreamStudio_API_KEY"),
verbose = TRUE
) {

# Verify if text_prompts is not empty or NULL
Expand Down Expand Up @@ -139,7 +141,7 @@ payload <- list(

for (i in seq_len(number_of_images)) {
#i <- 1
cat("Generate", i, "image\n")
if(verbose){cat("Generate", i, "image\n")}

response <- httr::POST(uri,
body = payload,
Expand Down
3 changes: 0 additions & 3 deletions cran-comments.md
Original file line number Diff line number Diff line change
@@ -1,8 +1,5 @@
In response to the review comments, I have revised the package.

## R CMD check results

0 errors | 0 warnings | 1 note

* This is a new release.

5 changes: 4 additions & 1 deletion man/TextSummary.Rd

Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.

Loading

0 comments on commit 0dd2307

Please sign in to comment.