Skip to content

Commit

Permalink
lint: fix linter warnings reported by golangci-lint (sashabaranov#522)
Browse files Browse the repository at this point in the history
  • Loading branch information
simonklee committed Nov 7, 2023
1 parent 9e0232f commit 0664105
Show file tree
Hide file tree
Showing 23 changed files with 425 additions and 431 deletions.
1 change: 0 additions & 1 deletion api_integration_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -9,7 +9,6 @@ import (
"os"
"testing"

. "github.com/sashabaranov/go-openai"
"github.com/sashabaranov/go-openai/internal/test/checks"
"github.com/sashabaranov/go-openai/jsonschema"
)
Expand Down
14 changes: 7 additions & 7 deletions audio_api_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -12,7 +12,7 @@ import (
"strings"
"testing"

. "github.com/sashabaranov/go-openai"
"github.com/sashabaranov/go-openai"
"github.com/sashabaranov/go-openai/internal/test"
"github.com/sashabaranov/go-openai/internal/test/checks"
)
Expand All @@ -26,7 +26,7 @@ func TestAudio(t *testing.T) {

testcases := []struct {
name string
createFn func(context.Context, AudioRequest) (AudioResponse, error)
createFn func(context.Context, openai.AudioRequest) (openai.AudioResponse, error)
}{
{
"transcribe",
Expand All @@ -48,7 +48,7 @@ func TestAudio(t *testing.T) {
path := filepath.Join(dir, "fake.mp3")
test.CreateTestFile(t, path)

req := AudioRequest{
req := openai.AudioRequest{
FilePath: path,
Model: "whisper-3",
}
Expand All @@ -57,7 +57,7 @@ func TestAudio(t *testing.T) {
})

t.Run(tc.name+" (with reader)", func(t *testing.T) {
req := AudioRequest{
req := openai.AudioRequest{
FilePath: "fake.webm",
Reader: bytes.NewBuffer([]byte(`some webm binary data`)),
Model: "whisper-3",
Expand All @@ -76,7 +76,7 @@ func TestAudioWithOptionalArgs(t *testing.T) {

testcases := []struct {
name string
createFn func(context.Context, AudioRequest) (AudioResponse, error)
createFn func(context.Context, openai.AudioRequest) (openai.AudioResponse, error)
}{
{
"transcribe",
Expand All @@ -98,13 +98,13 @@ func TestAudioWithOptionalArgs(t *testing.T) {
path := filepath.Join(dir, "fake.mp3")
test.CreateTestFile(t, path)

req := AudioRequest{
req := openai.AudioRequest{
FilePath: path,
Model: "whisper-3",
Prompt: "用简体中文",
Temperature: 0.5,
Language: "zh",
Format: AudioResponseFormatSRT,
Format: openai.AudioResponseFormatSRT,
}
_, err := tc.createFn(ctx, req)
checks.NoError(t, err, "audio API error")
Expand Down
2 changes: 1 addition & 1 deletion audio_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -40,7 +40,7 @@ func TestAudioWithFailingFormBuilder(t *testing.T) {
}

var failForField string
mockBuilder.mockWriteField = func(fieldname, value string) error {
mockBuilder.mockWriteField = func(fieldname, _ string) error {
if fieldname == failForField {
return mockFailedErr
}
Expand Down
110 changes: 55 additions & 55 deletions chat_stream_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -10,36 +10,36 @@ import (
"strconv"
"testing"

. "github.com/sashabaranov/go-openai"
"github.com/sashabaranov/go-openai"
"github.com/sashabaranov/go-openai/internal/test/checks"
)

func TestChatCompletionsStreamWrongModel(t *testing.T) {
config := DefaultConfig("whatever")
config := openai.DefaultConfig("whatever")
config.BaseURL = "http://localhost/v1"
client := NewClientWithConfig(config)
client := openai.NewClientWithConfig(config)
ctx := context.Background()

req := ChatCompletionRequest{
req := openai.ChatCompletionRequest{
MaxTokens: 5,
Model: "ada",
Messages: []ChatCompletionMessage{
Messages: []openai.ChatCompletionMessage{
{
Role: ChatMessageRoleUser,
Role: openai.ChatMessageRoleUser,
Content: "Hello!",
},
},
}
_, err := client.CreateChatCompletionStream(ctx, req)
if !errors.Is(err, ErrChatCompletionInvalidModel) {
if !errors.Is(err, openai.ErrChatCompletionInvalidModel) {
t.Fatalf("CreateChatCompletion should return ErrChatCompletionInvalidModel, but returned: %v", err)
}
}

func TestCreateChatCompletionStream(t *testing.T) {
client, server, teardown := setupOpenAITestServer()
defer teardown()
server.RegisterHandler("/v1/chat/completions", func(w http.ResponseWriter, r *http.Request) {
server.RegisterHandler("/v1/chat/completions", func(w http.ResponseWriter, _ *http.Request) {
w.Header().Set("Content-Type", "text/event-stream")

// Send test responses
Expand All @@ -61,12 +61,12 @@ func TestCreateChatCompletionStream(t *testing.T) {
checks.NoError(t, err, "Write error")
})

stream, err := client.CreateChatCompletionStream(context.Background(), ChatCompletionRequest{
stream, err := client.CreateChatCompletionStream(context.Background(), openai.ChatCompletionRequest{
MaxTokens: 5,
Model: GPT3Dot5Turbo,
Messages: []ChatCompletionMessage{
Model: openai.GPT3Dot5Turbo,
Messages: []openai.ChatCompletionMessage{
{
Role: ChatMessageRoleUser,
Role: openai.ChatMessageRoleUser,
Content: "Hello!",
},
},
Expand All @@ -75,15 +75,15 @@ func TestCreateChatCompletionStream(t *testing.T) {
checks.NoError(t, err, "CreateCompletionStream returned error")
defer stream.Close()

expectedResponses := []ChatCompletionStreamResponse{
expectedResponses := []openai.ChatCompletionStreamResponse{
{
ID: "1",
Object: "completion",
Created: 1598069254,
Model: GPT3Dot5Turbo,
Choices: []ChatCompletionStreamChoice{
Model: openai.GPT3Dot5Turbo,
Choices: []openai.ChatCompletionStreamChoice{
{
Delta: ChatCompletionStreamChoiceDelta{
Delta: openai.ChatCompletionStreamChoiceDelta{
Content: "response1",
},
FinishReason: "max_tokens",
Expand All @@ -94,10 +94,10 @@ func TestCreateChatCompletionStream(t *testing.T) {
ID: "2",
Object: "completion",
Created: 1598069255,
Model: GPT3Dot5Turbo,
Choices: []ChatCompletionStreamChoice{
Model: openai.GPT3Dot5Turbo,
Choices: []openai.ChatCompletionStreamChoice{
{
Delta: ChatCompletionStreamChoiceDelta{
Delta: openai.ChatCompletionStreamChoiceDelta{
Content: "response2",
},
FinishReason: "max_tokens",
Expand Down Expand Up @@ -133,7 +133,7 @@ func TestCreateChatCompletionStream(t *testing.T) {
func TestCreateChatCompletionStreamError(t *testing.T) {
client, server, teardown := setupOpenAITestServer()
defer teardown()
server.RegisterHandler("/v1/chat/completions", func(w http.ResponseWriter, r *http.Request) {
server.RegisterHandler("/v1/chat/completions", func(w http.ResponseWriter, _ *http.Request) {
w.Header().Set("Content-Type", "text/event-stream")

// Send test responses
Expand All @@ -156,12 +156,12 @@ func TestCreateChatCompletionStreamError(t *testing.T) {
checks.NoError(t, err, "Write error")
})

stream, err := client.CreateChatCompletionStream(context.Background(), ChatCompletionRequest{
stream, err := client.CreateChatCompletionStream(context.Background(), openai.ChatCompletionRequest{
MaxTokens: 5,
Model: GPT3Dot5Turbo,
Messages: []ChatCompletionMessage{
Model: openai.GPT3Dot5Turbo,
Messages: []openai.ChatCompletionMessage{
{
Role: ChatMessageRoleUser,
Role: openai.ChatMessageRoleUser,
Content: "Hello!",
},
},
Expand All @@ -173,7 +173,7 @@ func TestCreateChatCompletionStreamError(t *testing.T) {
_, streamErr := stream.Recv()
checks.HasError(t, streamErr, "stream.Recv() did not return error")

var apiErr *APIError
var apiErr *openai.APIError
if !errors.As(streamErr, &apiErr) {
t.Errorf("stream.Recv() did not return APIError")
}
Expand All @@ -183,7 +183,7 @@ func TestCreateChatCompletionStreamError(t *testing.T) {
func TestCreateChatCompletionStreamWithHeaders(t *testing.T) {
client, server, teardown := setupOpenAITestServer()
defer teardown()
server.RegisterHandler("/v1/chat/completions", func(w http.ResponseWriter, r *http.Request) {
server.RegisterHandler("/v1/chat/completions", func(w http.ResponseWriter, _ *http.Request) {
w.Header().Set("Content-Type", "text/event-stream")
w.Header().Set(xCustomHeader, xCustomHeaderValue)

Expand All @@ -196,12 +196,12 @@ func TestCreateChatCompletionStreamWithHeaders(t *testing.T) {
checks.NoError(t, err, "Write error")
})

stream, err := client.CreateChatCompletionStream(context.Background(), ChatCompletionRequest{
stream, err := client.CreateChatCompletionStream(context.Background(), openai.ChatCompletionRequest{
MaxTokens: 5,
Model: GPT3Dot5Turbo,
Messages: []ChatCompletionMessage{
Model: openai.GPT3Dot5Turbo,
Messages: []openai.ChatCompletionMessage{
{
Role: ChatMessageRoleUser,
Role: openai.ChatMessageRoleUser,
Content: "Hello!",
},
},
Expand All @@ -219,7 +219,7 @@ func TestCreateChatCompletionStreamWithHeaders(t *testing.T) {
func TestCreateChatCompletionStreamWithRatelimitHeaders(t *testing.T) {
client, server, teardown := setupOpenAITestServer()
defer teardown()
server.RegisterHandler("/v1/chat/completions", func(w http.ResponseWriter, r *http.Request) {
server.RegisterHandler("/v1/chat/completions", func(w http.ResponseWriter, _ *http.Request) {
w.Header().Set("Content-Type", "text/event-stream")
for k, v := range rateLimitHeaders {
switch val := v.(type) {
Expand All @@ -239,12 +239,12 @@ func TestCreateChatCompletionStreamWithRatelimitHeaders(t *testing.T) {
checks.NoError(t, err, "Write error")
})

stream, err := client.CreateChatCompletionStream(context.Background(), ChatCompletionRequest{
stream, err := client.CreateChatCompletionStream(context.Background(), openai.ChatCompletionRequest{
MaxTokens: 5,
Model: GPT3Dot5Turbo,
Messages: []ChatCompletionMessage{
Model: openai.GPT3Dot5Turbo,
Messages: []openai.ChatCompletionMessage{
{
Role: ChatMessageRoleUser,
Role: openai.ChatMessageRoleUser,
Content: "Hello!",
},
},
Expand All @@ -264,7 +264,7 @@ func TestCreateChatCompletionStreamWithRatelimitHeaders(t *testing.T) {
func TestCreateChatCompletionStreamErrorWithDataPrefix(t *testing.T) {
client, server, teardown := setupOpenAITestServer()
defer teardown()
server.RegisterHandler("/v1/chat/completions", func(w http.ResponseWriter, r *http.Request) {
server.RegisterHandler("/v1/chat/completions", func(w http.ResponseWriter, _ *http.Request) {
w.Header().Set("Content-Type", "text/event-stream")

// Send test responses
Expand All @@ -276,12 +276,12 @@ func TestCreateChatCompletionStreamErrorWithDataPrefix(t *testing.T) {
checks.NoError(t, err, "Write error")
})

stream, err := client.CreateChatCompletionStream(context.Background(), ChatCompletionRequest{
stream, err := client.CreateChatCompletionStream(context.Background(), openai.ChatCompletionRequest{
MaxTokens: 5,
Model: GPT3Dot5Turbo,
Messages: []ChatCompletionMessage{
Model: openai.GPT3Dot5Turbo,
Messages: []openai.ChatCompletionMessage{
{
Role: ChatMessageRoleUser,
Role: openai.ChatMessageRoleUser,
Content: "Hello!",
},
},
Expand All @@ -293,7 +293,7 @@ func TestCreateChatCompletionStreamErrorWithDataPrefix(t *testing.T) {
_, streamErr := stream.Recv()
checks.HasError(t, streamErr, "stream.Recv() did not return error")

var apiErr *APIError
var apiErr *openai.APIError
if !errors.As(streamErr, &apiErr) {
t.Errorf("stream.Recv() did not return APIError")
}
Expand All @@ -303,7 +303,7 @@ func TestCreateChatCompletionStreamErrorWithDataPrefix(t *testing.T) {
func TestCreateChatCompletionStreamRateLimitError(t *testing.T) {
client, server, teardown := setupOpenAITestServer()
defer teardown()
server.RegisterHandler("/v1/chat/completions", func(w http.ResponseWriter, r *http.Request) {
server.RegisterHandler("/v1/chat/completions", func(w http.ResponseWriter, _ *http.Request) {
w.Header().Set("Content-Type", "application/json")
w.WriteHeader(429)

Expand All @@ -317,18 +317,18 @@ func TestCreateChatCompletionStreamRateLimitError(t *testing.T) {
_, err := w.Write(dataBytes)
checks.NoError(t, err, "Write error")
})
_, err := client.CreateChatCompletionStream(context.Background(), ChatCompletionRequest{
_, err := client.CreateChatCompletionStream(context.Background(), openai.ChatCompletionRequest{
MaxTokens: 5,
Model: GPT3Dot5Turbo,
Messages: []ChatCompletionMessage{
Model: openai.GPT3Dot5Turbo,
Messages: []openai.ChatCompletionMessage{
{
Role: ChatMessageRoleUser,
Role: openai.ChatMessageRoleUser,
Content: "Hello!",
},
},
Stream: true,
})
var apiErr *APIError
var apiErr *openai.APIError
if !errors.As(err, &apiErr) {
t.Errorf("TestCreateChatCompletionStreamRateLimitError did not return APIError")
}
Expand All @@ -345,7 +345,7 @@ func TestAzureCreateChatCompletionStreamRateLimitError(t *testing.T) {
client, server, teardown := setupAzureTestServer()
defer teardown()
server.RegisterHandler("/openai/deployments/gpt-35-turbo/chat/completions",
func(w http.ResponseWriter, r *http.Request) {
func(w http.ResponseWriter, _ *http.Request) {
w.Header().Set("Content-Type", "application/json")
w.WriteHeader(http.StatusTooManyRequests)
// Send test responses
Expand All @@ -355,13 +355,13 @@ func TestAzureCreateChatCompletionStreamRateLimitError(t *testing.T) {
checks.NoError(t, err, "Write error")
})

apiErr := &APIError{}
_, err := client.CreateChatCompletionStream(context.Background(), ChatCompletionRequest{
apiErr := &openai.APIError{}
_, err := client.CreateChatCompletionStream(context.Background(), openai.ChatCompletionRequest{
MaxTokens: 5,
Model: GPT3Dot5Turbo,
Messages: []ChatCompletionMessage{
Model: openai.GPT3Dot5Turbo,
Messages: []openai.ChatCompletionMessage{
{
Role: ChatMessageRoleUser,
Role: openai.ChatMessageRoleUser,
Content: "Hello!",
},
},
Expand All @@ -387,7 +387,7 @@ func TestAzureCreateChatCompletionStreamRateLimitError(t *testing.T) {
}

// Helper funcs.
func compareChatResponses(r1, r2 ChatCompletionStreamResponse) bool {
func compareChatResponses(r1, r2 openai.ChatCompletionStreamResponse) bool {
if r1.ID != r2.ID || r1.Object != r2.Object || r1.Created != r2.Created || r1.Model != r2.Model {
return false
}
Expand All @@ -402,7 +402,7 @@ func compareChatResponses(r1, r2 ChatCompletionStreamResponse) bool {
return true
}

func compareChatStreamResponseChoices(c1, c2 ChatCompletionStreamChoice) bool {
func compareChatStreamResponseChoices(c1, c2 openai.ChatCompletionStreamChoice) bool {
if c1.Index != c2.Index {
return false
}
Expand Down
Loading

0 comments on commit 0664105

Please sign in to comment.