Skip to content

Commit bcf808a

Browse files
authored
feat: docs + dependency updates (#494)
* full feature flag * github action full feature flag * fix docs warnings * expanded types for docs.rs * types docs * updated README * add docs on scope * add CONTRIBUTING.md * 🎉 * updated readm * doc comment * updated lib.rs for docs.rs * updated doc tests in lib.rs * cleanup * update feature flags * borrow-instead-of-move example * updated example * References in README * Using References * simplify cargo.toml * dependency version to allow patch or minor updates * allow minor or patch updates in example dependencies * add language to code blocks in README for syntax colors * unittests: add missing feature flags in all mod tests {} * fix feature flags required for tests * update embedding tests to use latest small model * fix ser_de test * fix vector_store_files tests * fix assistants examples to include required beta header * fix example responses-images-and-vision * fix responses-structured-outputs example * fix for video-types
1 parent 1ea4351 commit bcf808a

File tree

73 files changed

+473
-244
lines changed

Some content is hidden

Large Commits have some content hidden by default. Use the searchbox below for content that may be hidden.

73 files changed

+473
-244
lines changed

.github/workflows/pr-checks.yml

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -77,6 +77,7 @@ jobs:
7777
administration-types,
7878
completion-types,
7979
types,
80+
full,
8081
]
8182

8283
steps:

CONTRIBUTING.md

Lines changed: 18 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,18 @@
1+
## Contributing to async-openai
2+
3+
Thank you for taking the time to contribute and improve the project. I'd be happy to have you!
4+
5+
All forms of contributions, such as new features requests, bug fixes, issues, documentation, testing, comments, [examples](https://github.com/64bit/async-openai/tree/main/examples) etc. are welcome.
6+
7+
A good starting point would be to look at existing [open issues](https://github.com/64bit/async-openai/issues).
8+
9+
To maintain quality of the project, a minimum of the following is a must for code contribution:
10+
11+
- **Names & Documentation**: All struct names, field names and doc comments are from OpenAPI spec. Nested objects in spec without names leaves room for making appropriate name.
12+
- **Tested**: For changes supporting test(s) and/or example is required. Existing examples, doc tests, unit tests, and integration tests should be made to work with the changes if applicable.
13+
- **Scope**: Keep scope limited to APIs available in official documents such as [API Reference](https://platform.openai.com/docs/api-reference) or [OpenAPI spec](https://github.com/openai/openai-openapi/). Other LLMs or AI Providers offer OpenAI-compatible APIs, yet they may not always have full parity - for those use `byot` feature.
14+
- **Consistency**: Keep code style consistent across all the "APIs" that library exposes; it creates a great developer experience.
15+
16+
This project adheres to [Rust Code of Conduct](https://www.rust-lang.org/policies/code-of-conduct)
17+
18+
Unless you explicitly state otherwise, any contribution intentionally submitted for inclusion in async-openai by you, shall be licensed as MIT, without any additional terms or conditions.

async-openai/Cargo.toml

Lines changed: 54 additions & 27 deletions
Original file line numberDiff line numberDiff line change
@@ -13,15 +13,15 @@ homepage = "https://github.com/64bit/async-openai"
1313
repository = "https://github.com/64bit/async-openai"
1414

1515
[features]
16-
default = []
16+
default = ["rustls"]
1717
# Enable rustls for TLS support
18-
rustls = ["_api", "dep:reqwest", "reqwest/rustls-tls-native-roots"]
18+
rustls = ["dep:reqwest", "reqwest/rustls-tls-native-roots"]
1919
# Enable rustls and webpki-roots
20-
rustls-webpki-roots = ["_api", "dep:reqwest", "reqwest/rustls-tls-webpki-roots"]
20+
rustls-webpki-roots = ["dep:reqwest", "reqwest/rustls-tls-webpki-roots"]
2121
# Enable native-tls for TLS support
22-
native-tls = ["_api","dep:reqwest", "reqwest/native-tls"]
22+
native-tls = ["dep:reqwest", "reqwest/native-tls"]
2323
# Remove dependency on OpenSSL
24-
native-tls-vendored = ["_api", "dep:reqwest", "reqwest/native-tls-vendored"]
24+
native-tls-vendored = ["dep:reqwest", "reqwest/native-tls-vendored"]
2525
# Bring your own types
2626
byot = ["dep:async-openai-macros"]
2727

@@ -101,6 +101,33 @@ types = [
101101
"completion-types",
102102
]
103103

104+
# Enable all features
105+
full = [
106+
"responses",
107+
"webhook",
108+
"audio",
109+
"video",
110+
"image",
111+
"embedding",
112+
"evals",
113+
"finetuning",
114+
"batch",
115+
"file",
116+
"upload",
117+
"model",
118+
"moderation",
119+
"vectorstore",
120+
"chatkit",
121+
"container",
122+
"realtime",
123+
"chat-completion",
124+
"assistant",
125+
"administration",
126+
"completions",
127+
"types",
128+
"byot",
129+
]
130+
104131
# Internal feature to enable API dependencies
105132
_api = [
106133
"dep:async-openai-macros",
@@ -125,47 +152,47 @@ _api = [
125152

126153
[dependencies]
127154
# Core dependencies - always needed for types
128-
serde = { version = "1.0.217", features = ["derive", "rc"] }
129-
serde_json = "1.0.135"
130-
derive_builder = { version = "0.20.2", optional = true }
131-
bytes = { version = "1.9.0", optional = true }
155+
serde = { version = "1", features = ["derive", "rc"] }
156+
serde_json = "1"
157+
derive_builder = { version = "0.20", optional = true }
158+
bytes = { version = "1.11", optional = true }
132159

133160
# API dependencies - only needed when API features are enabled
134161
# We use a feature gate to enable these when any API feature is enabled
135162
async-openai-macros = { path = "../async-openai-macros", version = "0.1.0", optional = true }
136163
backoff = { version = "0.4.0", features = ["tokio"], optional = true }
137-
base64 = { version = "0.22.1", optional = true }
138-
futures = { version = "0.3.31", optional = true }
139-
rand = { version = "0.9.0", optional = true }
140-
reqwest = { version = "0.12.12", features = [
164+
base64 = { version = "0.22", optional = true }
165+
futures = { version = "0.3", optional = true }
166+
rand = { version = "0.9", optional = true }
167+
reqwest = { version = "0.12", features = [
141168
"json",
142169
"stream",
143170
"multipart",
144171
], default-features = false, optional = true }
145172
reqwest-eventsource = { version = "0.6.0", optional = true }
146-
thiserror = { version = "2.0.11", optional = true }
147-
tokio = { version = "1.43.0", features = ["fs", "macros"], optional = true }
148-
tokio-stream = { version = "0.1.17", optional = true }
149-
tokio-util = { version = "0.7.13", features = ["codec", "io-util"], optional = true }
150-
tracing = { version = "0.1.41", optional = true }
151-
secrecy = { version = "0.10.3", features = ["serde"], optional = true }
152-
eventsource-stream = { version = "0.2.3", optional = true }
153-
serde_urlencoded = { version = "0.7.1", optional = true }
173+
thiserror = { version = "2", optional = true }
174+
tokio = { version = "1", features = ["fs", "macros"], optional = true }
175+
tokio-stream = { version = "0.1", optional = true }
176+
tokio-util = { version = "0.7", features = ["codec", "io-util"], optional = true }
177+
tracing = { version = "0.1", optional = true }
178+
secrecy = { version = "0.10", features = ["serde"], optional = true }
179+
eventsource-stream = { version = "0.2", optional = true }
180+
serde_urlencoded = { version = "0.7", optional = true }
154181
url = { version = "2.5", optional = true }
155182
# For Realtime websocket
156-
tokio-tungstenite = { version = "0.26.1", optional = true, default-features = false }
183+
tokio-tungstenite = { version = "0.28", optional = true, default-features = false }
157184
# For Webhook signature verification
158185
hmac = { version = "0.12", optional = true, default-features = false}
159186
sha2 = { version = "0.10", optional = true, default-features = false }
160187
hex = { version = "0.4", optional = true, default-features = false }
161188

162189
[dev-dependencies]
163-
tokio-test = "0.4.4"
164-
serde_json = "1.0"
190+
tokio-test = "0.4"
191+
serde_json = "1"
165192

166193
[[test]]
167194
name = "bring_your_own_type"
168-
required-features = ["byot", "file", "assistant", "model", "moderation", "image", "chat-completion", "completions", "audio", "embedding", "finetuning", "batch", "administration", "upload", "vectorstore", "responses", "chatkit", "container", "evals", "video"]
195+
required-features = ["full"]
169196

170197
[[test]]
171198
name = "boxed_future"
@@ -177,15 +204,15 @@ required-features = ["chat-completion-types"]
177204

178205
[[test]]
179206
name = "embeddings"
180-
required-features = ["embedding-types", "chat-completion-types"]
207+
required-features = ["embedding-types"]
181208

182209
[[test]]
183210
name = "ser_de"
184211
required-features = ["chat-completion-types"]
185212

186213
[[test]]
187214
name = "whisper"
188-
required-features = ["audio", "file-types"]
215+
required-features = ["audio"]
189216

190217
[package.metadata.docs.rs]
191218
all-features = true

async-openai/README.md

Lines changed: 45 additions & 21 deletions
Original file line numberDiff line numberDiff line change
@@ -110,7 +110,7 @@ async fn main() -> Result<(), Box<dyn Error>> {
110110

111111
## Webhooks
112112

113-
Support for webhook event types, signature verification, and building webhook events from payloads can be enabled by using the `webhook` feature flag.
113+
Support for webhook includes event types, signature verification, and building webhook events from payloads.
114114

115115
## Bring Your Own Types
116116

@@ -150,26 +150,62 @@ This can be useful in many scenarios:
150150
Visit [examples/bring-your-own-type](https://github.com/64bit/async-openai/tree/main/examples/bring-your-own-type)
151151
directory to learn more.
152152

153+
### References: Borrow Instead of Move
154+
155+
With `byot` use reference to request types
156+
157+
```rust
158+
let response: Response = client
159+
.responses()
160+
.create_byot(&request).await?
161+
```
162+
163+
Visit [examples/borrow-instead-of-move](https://github.com/64bit/async-openai/tree/main/examples/borrow-instead-of-move) to learn more.
164+
165+
153166
## Rust Types
154167

155-
To only use Rust types from the crate - use feature flag `types`.
168+
To only use Rust types from the crate - disable default features and use feature flag `types`.
156169

157170
There are granular feature flags like `response-types`, `chat-completion-types`, etc.
158171

172+
These granular types are enabled when the corresponding API feature is enabled - for example `response` will enable `response-types`.
173+
174+
## Configurable Requests
175+
176+
### Individual Request
177+
Certain individual APIs that need additional query or header parameters - these can be provided by chaining `.query()`, `.header()`, `.headers()` on the API group.
178+
179+
For example:
180+
```rust
181+
client.
182+
.chat()
183+
// query can be a struct or a map too.
184+
.query(&[("limit", "10")])?
185+
// header for demo
186+
.header("key", "value")?
187+
.list()
188+
.await?
189+
```
190+
191+
### All Requests
192+
193+
Use `Config`, `OpenAIConfig` etc. for configuring url, headers or query parameters globally for all requests.
194+
159195
## OpenAI-compatible Providers
160196

161-
### Configurable Request
197+
Even though the scope of the crate is official OpenAI APIs, it is very configurable to work with compatible providers.
198+
199+
### Configurable Path
162200

163-
To change path, query or headers of individual request use the `.path()`, `.query()`, `.header()`, `.headers()` method on the API group.
201+
In addition to `.query()`, `.header()`, `.headers()` a path for individual request can be changed by using `.path()`, method on the API group.
164202

165203
For example:
166204

167-
```
205+
```rust
168206
client
169207
.chat()
170208
.path("/v1/messages")?
171-
.query(&[("role", "user")])?
172-
.header("key", "value")?
173209
.create(request)
174210
.await?
175211
```
@@ -200,22 +236,10 @@ fn chat_completion(client: &Client<Box<dyn Config>>) {
200236

201237
## Contributing
202238

203-
Thank you for taking the time to contribute and improve the project. I'd be happy to have you!
204-
205-
All forms of contributions, such as new features requests, bug fixes, issues, documentation, testing, comments, [examples](https://github.com/64bit/async-openai/tree/main/examples) etc. are welcome.
206-
207-
A good starting point would be to look at existing [open issues](https://github.com/64bit/async-openai/issues).
208-
209-
To maintain quality of the project, a minimum of the following is a must for code contribution:
210-
211-
- **Names & Documentation**: All struct names, field names and doc comments are from OpenAPI spec. Nested objects in spec without names leaves room for making appropriate name.
212-
- **Tested**: For changes supporting test(s) and/or example is required. Existing examples, doc tests, unit tests, and integration tests should be made to work with the changes if applicable.
213-
- **Scope**: Keep scope limited to APIs available in official documents such as [API Reference](https://platform.openai.com/docs/api-reference) or [OpenAPI spec](https://github.com/openai/openai-openapi/). Other LLMs or AI Providers offer OpenAI-compatible APIs, yet they may not always have full parity - for those use `byot` feature.
214-
- **Consistency**: Keep code style consistent across all the "APIs" that library exposes; it creates a great developer experience.
239+
🎉 Thank you for taking the time to contribute and improve the project. I'd be happy to have you!
215240

216-
This project adheres to [Rust Code of Conduct](https://www.rust-lang.org/policies/code-of-conduct)
241+
Please see [contributing guide!](https://github.com/64bit/async-openai/blob/main/CONTRIBUTING.md)
217242

218-
Unless you explicitly state otherwise, any contribution intentionally submitted for inclusion in async-openai by you, shall be licensed as MIT, without any additional terms or conditions.
219243

220244
## Complimentary Crates
221245
- [async-openai-wasm](https://github.com/ifsheldon/async-openai-wasm) provides WASM support.

async-openai/src/assistants/threads.rs

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -24,12 +24,12 @@ impl<'c, C: Config> Threads<'c, C> {
2424
}
2525
}
2626

27-
/// Call [Messages] group API to manage message in [thread_id] thread.
27+
/// Call [Messages] group API to manage message in `thread_id` thread.
2828
pub fn messages(&self, thread_id: &str) -> Messages<'_, C> {
2929
Messages::new(self.client, thread_id)
3030
}
3131

32-
/// Call [Runs] group API to manage runs in [thread_id] thread.
32+
/// Call [Runs] group API to manage runs in `thread_id` thread.
3333
pub fn runs(&self, thread_id: &str) -> Runs<'_, C> {
3434
Runs::new(self.client, thread_id)
3535
}

async-openai/src/embedding.rs

Lines changed: 8 additions & 11 deletions
Original file line numberDiff line numberDiff line change
@@ -72,7 +72,7 @@ impl<'c, C: Config> Embeddings<'c, C> {
7272
}
7373
}
7474

75-
#[cfg(test)]
75+
#[cfg(all(test, feature = "embedding"))]
7676
mod tests {
7777
use crate::error::OpenAIError;
7878
use crate::types::embeddings::{CreateEmbeddingResponse, Embedding, EncodingFormat};
@@ -83,7 +83,7 @@ mod tests {
8383
let client = Client::new();
8484

8585
let request = CreateEmbeddingRequestArgs::default()
86-
.model("text-embedding-ada-002")
86+
.model("text-embedding-3-small")
8787
.input("The food was delicious and the waiter...")
8888
.build()
8989
.unwrap();
@@ -98,7 +98,7 @@ mod tests {
9898
let client = Client::new();
9999

100100
let request = CreateEmbeddingRequestArgs::default()
101-
.model("text-embedding-ada-002")
101+
.model("text-embedding-3-small")
102102
.input(["The food was delicious", "The waiter was good"])
103103
.build()
104104
.unwrap();
@@ -113,7 +113,7 @@ mod tests {
113113
let client = Client::new();
114114

115115
let request = CreateEmbeddingRequestArgs::default()
116-
.model("text-embedding-ada-002")
116+
.model("text-embedding-3-small")
117117
.input([1, 2, 3])
118118
.build()
119119
.unwrap();
@@ -128,7 +128,7 @@ mod tests {
128128
let client = Client::new();
129129

130130
let request = CreateEmbeddingRequestArgs::default()
131-
.model("text-embedding-ada-002")
131+
.model("text-embedding-3-small")
132132
.input([[1, 2, 3], [4, 5, 6], [7, 8, 10]])
133133
.build()
134134
.unwrap();
@@ -143,7 +143,7 @@ mod tests {
143143
let client = Client::new();
144144

145145
let request = CreateEmbeddingRequestArgs::default()
146-
.model("text-embedding-ada-002")
146+
.model("text-embedding-3-small")
147147
.input([vec![1, 2, 3], vec![4, 5, 6, 7], vec![7, 8, 10, 11, 100257]])
148148
.build()
149149
.unwrap();
@@ -178,7 +178,7 @@ mod tests {
178178
async fn test_cannot_use_base64_encoding_with_normal_create_request() {
179179
let client = Client::new();
180180

181-
const MODEL: &str = "text-embedding-ada-002";
181+
const MODEL: &str = "text-embedding-3-small";
182182
const INPUT: &str = "You shall not pass.";
183183

184184
let b64_request = CreateEmbeddingRequestArgs::default()
@@ -195,7 +195,7 @@ mod tests {
195195
async fn test_embedding_create_base64() {
196196
let client = Client::new();
197197

198-
const MODEL: &str = "text-embedding-ada-002";
198+
const MODEL: &str = "text-embedding-3-small";
199199
const INPUT: &str = "a head full of dreams";
200200

201201
let b64_request = CreateEmbeddingRequestArgs::default()
@@ -221,8 +221,5 @@ mod tests {
221221
let embedding = response.data.into_iter().next().unwrap().embedding;
222222

223223
assert_eq!(b64_embedding.len(), embedding.len());
224-
for (b64, normal) in b64_embedding.iter().zip(embedding.iter()) {
225-
assert!((b64 - normal).abs() < 1e-6);
226-
}
227224
}
228225
}

async-openai/src/file.rs

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -76,7 +76,7 @@ impl<'c, C: Config> Files<'c, C> {
7676
}
7777
}
7878

79-
#[cfg(test)]
79+
#[cfg(all(test, feature = "file"))]
8080
mod tests {
8181
use crate::{
8282
traits::RequestOptionsBuilder,

0 commit comments

Comments
 (0)