Skip to content

Commit

Permalink
refact: value classes
Browse files Browse the repository at this point in the history
  • Loading branch information
aallam committed Jun 19, 2021
1 parent 1b6a313 commit c18fa6b
Show file tree
Hide file tree
Showing 13 changed files with 152 additions and 176 deletions.
Original file line number Diff line number Diff line change
Expand Up @@ -53,31 +53,31 @@ internal class OpenAIApi(
request: SearchRequest
): List<SearchResult> {
return httpClient.post<SearchResponse>(
path = "/v1/engines/$engineId/search",
path = "$EnginesPath/$engineId/search",
body = request
) {
contentType(ContentType.Application.Json)
}.data
}

override suspend fun engines(): List<Engine> {
return httpClient.get<EnginesResponse>(path = "/v1/engines").data
return httpClient.get<EnginesResponse>(path = EnginesPath).data
}

override suspend fun engine(engineId: EngineId): Engine {
return httpClient.get(path = "/v1/engines/$engineId")
return httpClient.get(path = "$EnginesPath/$engineId")
}

override suspend fun completion(engineId: EngineId, request: CompletionRequest?): TextCompletion {
return httpClient.post(path = "/v1/engines/$engineId/completions", body = request ?: EmptyContent) {
return httpClient.post(path = "$EnginesPath/$engineId/completions", body = request ?: EmptyContent) {
contentType(ContentType.Application.Json)
}
}

override fun completions(engineId: EngineId, request: CompletionRequest?): Flow<TextCompletion> {
return flow {
httpClient.post<HttpStatement>(
path = "/v1/engines/$engineId/completions",
path = "$EnginesPath/$engineId/completions",
body = request.toStreamRequest()
) {
contentType(ContentType.Application.Json)
Expand All @@ -86,8 +86,8 @@ internal class OpenAIApi(
while (!readChannel.isClosedForRead) {
val line = readChannel.readUTF8Line() ?: ""
val value: TextCompletion = when {
line.startsWith(STREAM_END_TOKEN) -> break
line.startsWith(STREAM_PREFIX) -> JsonLenient.decodeFromString(line.removePrefix(STREAM_PREFIX))
line.startsWith(StreamEndToken) -> break
line.startsWith(StreamPrefix) -> JsonLenient.decodeFromString(line.removePrefix(StreamPrefix))
else -> continue
}
emit(value)
Expand All @@ -98,14 +98,14 @@ internal class OpenAIApi(

@ExperimentalOpenAI
override suspend fun classifications(request: ClassificationRequest): Classification {
return httpClient.post(path = "/v1/classifications", body = request) {
return httpClient.post(path = ClassificationsPath, body = request) {
contentType(ContentType.Application.Json)
}
}

@ExperimentalOpenAI
override suspend fun answers(request: AnswerRequest): Answer {
return httpClient.post(path = "/v1/answers", body = request) {
return httpClient.post(path = AnswersPath, body = request) {
contentType(ContentType.Application.Json)
}
}
Expand All @@ -116,28 +116,35 @@ internal class OpenAIApi(
appendFile(fileSystem, "file", request.file)
append("purpose", request.purpose.raw)
}
return httpClient.submitFormWithBinaryData(url = "/v1/files", formData = data)
return httpClient.submitFormWithBinaryData(url = FilesPath, formData = data)
}

override suspend fun files(): List<File> {
return httpClient.get<FileResponse>(path = "/v1/files").data
return httpClient.get<FileResponse>(path = FilesPath).data
}

override suspend fun file(fileId: FileId): File? {
return try {
httpClient.get(path = "/v1/files/$fileId")
httpClient.get(path = "$FilesPath/$fileId")
} catch (exception: ClientRequestException) {
if (exception.response.status == HttpStatusCode.NotFound) return null
throw exception
}
}

override suspend fun delete(fileId: FileId) {
return httpClient.delete(path = "/v1/files/$fileId")
return httpClient.delete(path = "$FilesPath/$fileId")
}

companion object {
private const val STREAM_PREFIX = "data:"
private const val STREAM_END_TOKEN = "$STREAM_PREFIX [DONE]"
// API Paths
private const val EnginesPath = "/v1/engines"
private const val ClassificationsPath = "/v1/classifications"
private const val AnswersPath = "/v1/answers"
private const val FilesPath = "/v1/files"

// Stream Tokens
private const val StreamPrefix = "data:"
private const val StreamEndToken = "$StreamPrefix [DONE]"
}
}
Original file line number Diff line number Diff line change
Expand Up @@ -20,8 +20,8 @@ internal class LabeledExampleSerializer : KSerializer<LabeledExample> {
override val descriptor: SerialDescriptor = serializer.descriptor

override fun deserialize(decoder: Decoder): LabeledExample {
val strings = serializer.deserialize(decoder)
return LabeledExample(example = strings[0], label = strings[1])
val (example, label) = serializer.deserialize(decoder)
return LabeledExample(example = example, label = label)
}

override fun serialize(encoder: Encoder, value: LabeledExample) {
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -2,75 +2,58 @@ package com.aallam.openai.api.engine

import com.aallam.openai.api.engine.internal.EngineIdSerializer
import kotlinx.serialization.Serializable
import kotlin.jvm.JvmInline

/**
* OpenAI’s API engine ID.
*/
@Serializable(EngineIdSerializer::class)
public sealed class EngineId(public val id: String) {

/**
* Ada is usually the fastest model and can perform tasks like parsing text, address correction
* and certain kinds of classification tasks that don’t require too much nuance. Ada’s performance
* can often be improved by providing more context.
*
* Good at: **Parsing text, simple classification, address correction, keywords**
*
* *Note: Any task performed by a faster model like Ada can be performed by a more powerful model like Curie or Davinci.*
*/
public object Ada : EngineId("ada")

/**
* Babbage can perform straightforward tasks like simple classification. It’s also quite capable
* when it comes to Semantic Search ranking how well documents match up with search queries.
*
* Good at: **Moderate classification, semantic search classification**
*/
public object Babbage : EngineId("babbage")

/**
* Curie is extremely powerful, yet very fast. While Davinci is stronger when it comes to
* analyzing complicated text, Curie is quite capable for many nuanced tasks like sentiment
* classification and summarization. Curie is also quite good at answering questions and
* performing Q\&A and as a general service chatbot.
*
* Good at: **Language translation, complex classification, text sentiment, summarization**
*/
public object Curie : EngineId("curie")

/**
* Davinci is the most capable engine and can perform any task the other models can perform and
* often with less instruction. For applications requiring a lot of understanding of the content,
* like summarization for a specific audience and content creative generation, Davinci is going to
* produce the best results. The trade-off with Davinci is that it costs more to use per API call
* and other engines are faster.
*
* Another area where Davinci shines is in understanding the intent of text.
* Davinci is quite good at solving many kinds of logic problems and explaining the motives of
* characters. Davinci has been able to solve some of the most challenging AI problems involving
* cause and effect.
*
* Good at: **Complex intent, cause and effect, summarization for audience**
*/
public object Davinci : EngineId("davinci")
@JvmInline
public value class EngineId(public val id: String) {
override fun toString(): String = id
}

/**
* Can be used to provide an engine id manually.
*/
public class Custom(id: String) : EngineId(id)
/**
* Ada is usually the fastest model and can perform tasks like parsing text, address correction
* and certain kinds of classification tasks that don’t require too much nuance. Ada’s performance
* can often be improved by providing more context.
*
* Good at: **Parsing text, simple classification, address correction, keywords**
*
* *Note: Any task performed by a faster model like Ada can be performed by a more powerful model like Curie or Davinci.*
*/
public val Ada: EngineId = EngineId("ada")

override fun toString(): String {
return id
}
/**
* Babbage can perform straightforward tasks like simple classification. It’s also quite capable
* when it comes to Semantic Search ranking how well documents match up with search queries.
*
* Good at: **Moderate classification, semantic search classification**
*/
public val Babbage: EngineId = EngineId("babbage")

override fun equals(other: Any?): Boolean {
if (this === other) return true
if (other !is EngineId) return false
if (id != other.id) return false
return true
}
/**
* Curie is extremely powerful, yet very fast. While Davinci is stronger when it comes to
* analyzing complicated text, Curie is quite capable for many nuanced tasks like sentiment
* classification and summarization. Curie is also quite good at answering questions and
* performing Q\&A and as a general service chatbot.
*
* Good at: **Language translation, complex classification, text sentiment, summarization**
*/
public val Curie: EngineId = EngineId("curie")

override fun hashCode(): Int {
return id.hashCode()
}
}
/**
* Davinci is the most capable engine and can perform any task the other models can perform and
* often with less instruction. For applications requiring a lot of understanding of the content,
* like summarization for a specific audience and content creative generation, Davinci is going to
* produce the best results. The trade-off with Davinci is that it costs more to use per API call
* and other engines are faster.
*
* Another area where Davinci shines is in understanding the intent of text.
* Davinci is quite good at solving many kinds of logic problems and explaining the motives of
* characters. Davinci has been able to solve some of the most challenging AI problems involving
* cause and effect.
*
* Good at: **Complex intent, cause and effect, summarization for audience**
*/
public val Davinci: EngineId = EngineId("davinci")
Original file line number Diff line number Diff line change
@@ -1,6 +1,6 @@
package com.aallam.openai.api.engine.internal

import com.aallam.openai.api.engine.EngineId
import com.aallam.openai.api.engine.*
import kotlinx.serialization.KSerializer
import kotlinx.serialization.builtins.serializer
import kotlinx.serialization.descriptors.SerialDescriptor
Expand All @@ -16,11 +16,11 @@ internal class EngineIdSerializer : KSerializer<EngineId> {

override fun deserialize(decoder: Decoder): EngineId {
return when (val value = decoder.decodeString()) {
EngineId.Ada.id -> EngineId.Ada
EngineId.Babbage.id -> EngineId.Babbage
EngineId.Curie.id -> EngineId.Curie
EngineId.Davinci.id -> EngineId.Davinci
else -> EngineId.Custom(value)
Ada.id -> Ada
Babbage.id -> Babbage
Curie.id -> Curie
Davinci.id -> Davinci
else -> EngineId(value)
}
}

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -6,11 +6,11 @@ public data class FileRequest(
/**
* JSON Lines file to be uploaded.
*
* If the [purpose] is set to "search" or "answers", each line is a JSON record with a "text" field and an optional
* If the [purpose] is set to [Search] or [Answers], each line is a JSON record with a "text" field and an optional
* "metadata" field. Only "text" field will be used for search. Specially, when the purpose is "answers", "\n"
* is used as a delimiter to chunk contents in the "text" field into multiple documents for finer-grained matching.
*
* If the [purpose] is set to "classifications", each line is a JSON record with a single training example with "text"
* If the [purpose] is set to [Classifications], each line is a JSON record with a single training example with "text"
* and "label" fields along with an optional "metadata" field.
*/
public val file: String,
Expand Down
Original file line number Diff line number Diff line change
@@ -1,5 +1,6 @@
package com.aallam.openai.api.file

import kotlinx.serialization.SerialName
import kotlinx.serialization.Serializable

@Serializable
Expand All @@ -8,5 +9,5 @@ public data class FileResponse(
/**
* List of files that belong to the user's organization.
*/
val data: List<File>
@SerialName("data") val data: List<File>
)
Original file line number Diff line number Diff line change
Expand Up @@ -2,25 +2,21 @@ package com.aallam.openai.api.file

import com.aallam.openai.api.file.internal.FileStatusSerializer
import kotlinx.serialization.Serializable
import kotlin.jvm.JvmInline

/**
* File status.
*/
@Serializable(FileStatusSerializer::class)
public sealed class FileStatus(public val raw: String) {
@JvmInline
public value class FileStatus(public val raw: String)

/**
* File status: uploaded.
*/
public object Uploaded : FileStatus("uploaded")

/**
* File status: processed
*/
public object Processed : FileStatus("processed")
/**
* File status: uploaded.
*/
public val Uploaded: FileStatus = FileStatus("uploaded")

/**
* Other file status.
*/
public class Custom(status: String) : FileStatus(status)
}
/**
* File status: processed
*/
public val Processed: FileStatus = FileStatus("processed")
Original file line number Diff line number Diff line change
Expand Up @@ -2,42 +2,23 @@ package com.aallam.openai.api.file

import com.aallam.openai.api.file.internal.PurposeSerializer
import kotlinx.serialization.Serializable
import kotlin.jvm.JvmInline

@Serializable(PurposeSerializer::class)
public sealed class Purpose(public val raw: String) {

/**
* File for Searches.
*/
public object Search : Purpose("search")

/**
* File for Answers.
*/
public object Answers : Purpose("answers")

/**
* File for classifications.
*/
public object Classifications : Purpose("classifications")

/**
* Provide a custom purpose.
*/
public class Custom(raw: String) : Purpose(raw)

override fun toString(): String {
return raw
}

override fun equals(other: Any?): Boolean {
if (this === other) return true
if (other !is Purpose) return false
if (raw != other.raw) return false
return true
}

override fun hashCode(): Int {
return raw.hashCode()
}
}
@JvmInline
public value class Purpose(public val raw: String)

/**
* File for Searches.
*/
public val Search: Purpose = Purpose("search")

/**
* File for Answers.
*/
public val Answers: Purpose = Purpose("answers")

/**
* File for classifications.
*/
public val Classifications: Purpose = Purpose("classifications")
Loading

0 comments on commit c18fa6b

Please sign in to comment.