Skip to content

Commit

Permalink
Merge pull request #1782 from SciPhi-AI/feature/centralized-limit-cal…
Browse files Browse the repository at this point in the history
…c-and-chunk-limits

Feature/centralized limit calc and chunk limits
  • Loading branch information
emrgnt-cmplxty authored Jan 8, 2025
2 parents 1eee23c + 465b155 commit eb2fa2a
Show file tree
Hide file tree
Showing 11 changed files with 1,112 additions and 372 deletions.
24 changes: 2 additions & 22 deletions js/sdk/src/v3/clients/users.ts
Original file line number Diff line number Diff line change
Expand Up @@ -323,6 +323,7 @@ export class UsersClient {
name?: string;
bio?: string;
profilePicture?: string;
metadata?: Record<string, string | null>;
}): Promise<WrappedUserResponse> {
const data = {
...(options.email && { email: options.email }),
Expand All @@ -332,6 +333,7 @@ export class UsersClient {
...(options.profilePicture && {
profile_picture: options.profilePicture,
}),
...(options.metadata && { metadata: options.metadata }),
};

return this.client.makeRequest("POST", `users/${options.id}`, {
Expand Down Expand Up @@ -515,26 +517,4 @@ export class UsersClient {
return this.client.makeRequest("GET", `users/${options.id}/limits`);
}


/**
* **Patch metadata** for a user using a Stripe-like approach.
*
* The `metadata` parameter merges existing metadata with new keys and values:
* - `metadata[key] = "some value"` => sets or updates the key
* - `metadata[key] = ""` => removes the key
* - empty `{}` => removes all metadata keys
*
* @param id The user ID to patch
* @param metadata Partial metadata updates
* @returns WrappedUserResponse
*/
@feature("users.patchMetadata")
async patchMetadata(options: {
id: string;
metadata: Record<string, string | null>;
}): Promise<WrappedUserResponse> {
return this.client.makeRequest("PATCH", `users/${options.id}/metadata`, {
data: options.metadata,
});
}
}
2 changes: 1 addition & 1 deletion llms.txt
Original file line number Diff line number Diff line change
Expand Up @@ -11001,7 +11001,7 @@ Below is a sample `r2r.toml` with essential configurations:
```toml
[app]
default_max_documents_per_user = 100
default_max_chunks_per_user = 100000
default_max_chunks_per_user = 10000
default_max_collections_per_user = 10

[agent]
Expand Down
2 changes: 1 addition & 1 deletion py/core/configs/r2r_azure.toml
Original file line number Diff line number Diff line change
Expand Up @@ -21,7 +21,7 @@ batch_size = 256

[embedding]
provider = "litellm"
base_model = "openai/text-embedding-3-small" # continue with `openai` for embeddings, due to server rate limit on azure
base_model = "azure/text-embedding-3-small"
base_dimension = 512

[file]
Expand Down
Loading

0 comments on commit eb2fa2a

Please sign in to comment.