This library provides unofficial Go clients for OpenAI API. We support:
- ChatGPT
- GPT-3, GPT-4
- DALL·E 2
- Whisper
go get github.com/odannyc/go-openai
Currently, go-openai requires Go version 1.18 or greater.
package main
import (
"context"
"fmt"
openai "github.com/odannyc/go-openai"
)
func main() {
client := openai.NewClient("your token")
resp, err := client.CreateChatCompletion(
context.Background(),
openai.ChatCompletionRequest{
Model: openai.GPT3Dot5Turbo,
Messages: []openai.ChatCompletionMessage{
{
Role: openai.ChatMessageRoleUser,
Content: "Hello!",
},
},
},
)
if err != nil {
fmt.Printf("ChatCompletion error: %v\n", err)
return
}
fmt.Println(resp.Choices[0].Message.Content)
}
- Visit the OpenAI website at https://platform.openai.com/account/api-keys.
- If you don't have an account, click on "Sign Up" to create one. If you do, click "Log In".
- Once logged in, navigate to your API key management page.
- Click on "Create new secret key".
- Enter a name for your new key, then click "Create secret key".
- Your new API key will be displayed. Use this key to interact with the OpenAI API.
Note: Your API key is sensitive information. Do not share it with anyone.
ChatGPT streaming completion
package main
import (
"context"
"errors"
"fmt"
"io"
openai "github.com/odannyc/go-openai"
)
func main() {
c := openai.NewClient("your token")
ctx := context.Background()
req := openai.ChatCompletionRequest{
Model: openai.GPT3Dot5Turbo,
MaxTokens: 20,
Messages: []openai.ChatCompletionMessage{
{
Role: openai.ChatMessageRoleUser,
Content: "Lorem ipsum",
},
},
Stream: true,
}
stream, err := c.CreateChatCompletionStream(ctx, req)
if err != nil {
fmt.Printf("ChatCompletionStream error: %v\n", err)
return
}
defer stream.Close()
fmt.Printf("Stream response: ")
for {
response, err := stream.Recv()
if errors.Is(err, io.EOF) {
fmt.Println("\nStream finished")
return
}
if err != nil {
fmt.Printf("\nStream error: %v\n", err)
return
}
fmt.Printf(response.Choices[0].Delta.Content)
}
}
GPT-3 completion
package main
import (
"context"
"fmt"
openai "github.com/odannyc/go-openai"
)
func main() {
c := openai.NewClient("your token")
ctx := context.Background()
req := openai.CompletionRequest{
Model: openai.GPT3Ada,
MaxTokens: 5,
Prompt: "Lorem ipsum",
}
resp, err := c.CreateCompletion(ctx, req)
if err != nil {
fmt.Printf("Completion error: %v\n", err)
return
}
fmt.Println(resp.Choices[0].Text)
}
GPT-3 streaming completion
package main
import (
"errors"
"context"
"fmt"
"io"
openai "github.com/odannyc/go-openai"
)
func main() {
c := openai.NewClient("your token")
ctx := context.Background()
req := openai.CompletionRequest{
Model: openai.GPT3Ada,
MaxTokens: 5,
Prompt: "Lorem ipsum",
Stream: true,
}
stream, err := c.CreateCompletionStream(ctx, req)
if err != nil {
fmt.Printf("CompletionStream error: %v\n", err)
return
}
defer stream.Close()
for {
response, err := stream.Recv()
if errors.Is(err, io.EOF) {
fmt.Println("Stream finished")
return
}
if err != nil {
fmt.Printf("Stream error: %v\n", err)
return
}
fmt.Printf("Stream response: %v\n", response)
}
}
Audio Speech-To-Text
package main
import (
"context"
"fmt"
openai "github.com/odannyc/go-openai"
)
func main() {
c := openai.NewClient("your token")
ctx := context.Background()
req := openai.AudioRequest{
Model: openai.Whisper1,
FilePath: "recording.mp3",
}
resp, err := c.CreateTranscription(ctx, req)
if err != nil {
fmt.Printf("Transcription error: %v\n", err)
return
}
fmt.Println(resp.Text)
}
Audio Captions
package main
import (
"context"
"fmt"
"os"
openai "github.com/odannyc/go-openai"
)
func main() {
c := openai.NewClient(os.Getenv("OPENAI_KEY"))
req := openai.AudioRequest{
Model: openai.Whisper1,
FilePath: os.Args[1],
Format: openai.AudioResponseFormatSRT,
}
resp, err := c.CreateTranscription(context.Background(), req)
if err != nil {
fmt.Printf("Transcription error: %v\n", err)
return
}
f, err := os.Create(os.Args[1] + ".srt")
if err != nil {
fmt.Printf("Could not open file: %v\n", err)
return
}
defer f.Close()
if _, err := f.WriteString(resp.Text); err != nil {
fmt.Printf("Error writing to file: %v\n", err)
return
}
}
DALL-E 2 image generation
package main
import (
"bytes"
"context"
"encoding/base64"
"fmt"
openai "github.com/odannyc/go-openai"
"image/png"
"os"
)
func main() {
c := openai.NewClient("your token")
ctx := context.Background()
// Sample image by link
reqUrl := openai.ImageRequest{
Prompt: "Parrot on a skateboard performs a trick, cartoon style, natural light, high detail",
Size: openai.CreateImageSize256x256,
ResponseFormat: openai.CreateImageResponseFormatURL,
N: 1,
}
respUrl, err := c.CreateImage(ctx, reqUrl)
if err != nil {
fmt.Printf("Image creation error: %v\n", err)
return
}
fmt.Println(respUrl.Data[0].URL)
// Example image as base64
reqBase64 := openai.ImageRequest{
Prompt: "Portrait of a humanoid parrot in a classic costume, high detail, realistic light, unreal engine",
Size: openai.CreateImageSize256x256,
ResponseFormat: openai.CreateImageResponseFormatB64JSON,
N: 1,
}
respBase64, err := c.CreateImage(ctx, reqBase64)
if err != nil {
fmt.Printf("Image creation error: %v\n", err)
return
}
imgBytes, err := base64.StdEncoding.DecodeString(respBase64.Data[0].B64JSON)
if err != nil {
fmt.Printf("Base64 decode error: %v\n", err)
return
}
r := bytes.NewReader(imgBytes)
imgData, err := png.Decode(r)
if err != nil {
fmt.Printf("PNG decode error: %v\n", err)
return
}
file, err := os.Create("example.png")
if err != nil {
fmt.Printf("File creation error: %v\n", err)
return
}
defer file.Close()
if err := png.Encode(file, imgData); err != nil {
fmt.Printf("PNG encode error: %v\n", err)
return
}
fmt.Println("The image was saved as example.png")
}
Configuring proxy
config := openai.DefaultConfig("token")
proxyUrl, err := url.Parse("http://localhost:{port}")
if err != nil {
panic(err)
}
transport := &http.Transport{
Proxy: http.ProxyURL(proxyUrl),
}
config.HTTPClient = &http.Client{
Transport: transport,
}
c := openai.NewClientWithConfig(config)
See also: https://pkg.go.dev/github.com/odannyc/go-openai#ClientConfig
ChatGPT support context
package main
import (
"bufio"
"context"
"fmt"
"os"
"strings"
"github.com/odannyc/go-openai"
)
func main() {
client := openai.NewClient("your token")
messages := make([]openai.ChatCompletionMessage, 0)
reader := bufio.NewReader(os.Stdin)
fmt.Println("Conversation")
fmt.Println("---------------------")
for {
fmt.Print("-> ")
text, _ := reader.ReadString('\n')
// convert CRLF to LF
text = strings.Replace(text, "\n", "", -1)
messages = append(messages, openai.ChatCompletionMessage{
Role: openai.ChatMessageRoleUser,
Content: text,
})
resp, err := client.CreateChatCompletion(
context.Background(),
openai.ChatCompletionRequest{
Model: openai.GPT3Dot5Turbo,
Messages: messages,
},
)
if err != nil {
fmt.Printf("ChatCompletion error: %v\n", err)
continue
}
content := resp.Choices[0].Message.Content
messages = append(messages, openai.ChatCompletionMessage{
Role: openai.ChatMessageRoleAssistant,
Content: content,
})
fmt.Println(content)
}
}
Azure OpenAI ChatGPT
package main
import (
"context"
"fmt"
openai "github.com/odannyc/go-openai"
)
func main() {
config := openai.DefaultAzureConfig("your Azure OpenAI Key", "https://your Azure OpenAI Endpoint")
// If you use a deployment name different from the model name, you can customize the AzureModelMapperFunc function
// config.AzureModelMapperFunc = func(model string) string {
// azureModelMapping = map[string]string{
// "gpt-3.5-turbo": "your gpt-3.5-turbo deployment name",
// }
// return azureModelMapping[model]
// }
client := openai.NewClientWithConfig(config)
resp, err := client.CreateChatCompletion(
context.Background(),
openai.ChatCompletionRequest{
Model: openai.GPT3Dot5Turbo,
Messages: []openai.ChatCompletionMessage{
{
Role: openai.ChatMessageRoleUser,
Content: "Hello Azure OpenAI!",
},
},
},
)
if err != nil {
fmt.Printf("ChatCompletion error: %v\n", err)
return
}
fmt.Println(resp.Choices[0].Message.Content)
}
Azure OpenAI Embeddings
package main
import (
"context"
"fmt"
openai "github.com/odannyc/go-openai"
)
func main() {
config := openai.DefaultAzureConfig("your Azure OpenAI Key", "https://your Azure OpenAI Endpoint")
config.APIVersion = "2023-05-15" // optional update to latest API version
//If you use a deployment name different from the model name, you can customize the AzureModelMapperFunc function
//config.AzureModelMapperFunc = func(model string) string {
// azureModelMapping = map[string]string{
// "gpt-3.5-turbo":"your gpt-3.5-turbo deployment name",
// }
// return azureModelMapping[model]
//}
input := "Text to vectorize"
client := openai.NewClientWithConfig(config)
resp, err := client.CreateEmbeddings(
context.Background(),
openai.EmbeddingRequest{
Input: []string{input},
Model: openai.AdaEmbeddingV2,
})
if err != nil {
fmt.Printf("CreateEmbeddings error: %v\n", err)
return
}
vectors := resp.Data[0].Embedding // []float32 with 1536 dimensions
fmt.Println(vectors[:10], "...", vectors[len(vectors)-10:])
}
JSON Schema for function calling
It is now possible for chat completion to choose to call a function for more information (see developer docs here).
In order to describe the type of functions that can be called, a JSON schema must be provided. Many JSON schema libraries exist and are more advanced than what we can offer in this library, however we have included a simple jsonschema
package for those who want to use this feature without formatting their own JSON schema payload.
The developer documents give this JSON schema definition as an example:
{
"name":"get_current_weather",
"description":"Get the current weather in a given location",
"parameters":{
"type":"object",
"properties":{
"location":{
"type":"string",
"description":"The city and state, e.g. San Francisco, CA"
},
"unit":{
"type":"string",
"enum":[
"celsius",
"fahrenheit"
]
}
},
"required":[
"location"
]
}
}
Using the jsonschema
package, this schema could be created using structs as such:
FunctionDefinition{
Name: "get_current_weather",
Parameters: jsonschema.Definition{
Type: jsonschema.Object,
Properties: map[string]jsonschema.Definition{
"location": {
Type: jsonschema.String,
Description: "The city and state, e.g. San Francisco, CA",
},
"unit": {
Type: jsonschema.String,
Enum: []string{"celcius", "fahrenheit"},
},
},
Required: []string{"location"},
},
}
The Parameters
field of a FunctionDefinition
can accept either of the above styles, or even a nested struct from another library (as long as it can be marshalled into JSON).
Error handling
Open-AI maintains clear documentation on how to handle API errors
example:
e := &openai.APIError{}
if errors.As(err, &e) {
switch e.HTTPStatusCode {
case 401:
// invalid auth or key (do not retry)
case 429:
// rate limiting or engine overload (wait and retry)
case 500:
// openai server error (retry)
default:
// unhandled
}
}
See the examples/
folder for more.
Integration tests are requested against the production version of the OpenAI API. These tests will verify that the library is properly coded against the actual behavior of the API, and will fail upon any incompatible change in the API.
Notes: These tests send real network traffic to the OpenAI API and may reach rate limits. Temporary network problems may also cause the test to fail.
Run tests using:
OPENAI_TOKEN=XXX go test -v -tags=integration ./api_integration_test.go
If the OPENAI_TOKEN
environment variable is not available, integration tests will be skipped.
We want to take a moment to express our deepest gratitude to the contributors and sponsors of this project:
To all of you: thank you. You've helped us achieve more than we ever imagined possible. Can't wait to see where we go next, together!