Advanced Features
This document introduces the advanced features of Lovrabet SDK, including multi-project support, performance optimization, error handling, and extension capabilities.
Before starting, please ensure you have completed SDK Configuration. We recommend using CLI automatic configuration generation.
The following examples use the alias method client.models.users for readability, but you can also use the standard method client.models.dataset_xxx (functionally identical).
đ Multi-Project Supportâ
The SDK fully supports accessing multiple project APIs within the same application.
Configuration Registration Method (Recommended)â
import { registerModels, createClient } from "@lovrabet/sdk";
// Register multiple project configurations
registerModels(
{
appCode: "project-a",
models: [
{ datasetCode: "dataset-a-001", tableName: "users_a", alias: "users" },
{ datasetCode: "dataset-a-002", tableName: "orders_a", alias: "orders" },
],
},
"project-a" // Configuration name
);
registerModels(
{
appCode: "project-b",
models: [
{ datasetCode: "dataset-b-001", tableName: "users_b", alias: "users" },
{ datasetCode: "dataset-b-002", tableName: "products_b", alias: "products" },
],
},
"project-b"
);
// Create clients for different projects
const projectA = createClient("project-a");
const projectB = createClient("project-b");
// Completely isolated, no interference
const usersFromA = await projectA.models.users.filter();
const usersFromB = await projectB.models.users.filter();
Multi-Project Managerâ
class MultiProjectManager {
private clients = new Map<string, any>();
private configs = new Map<string, any>();
addProject(name: string, config: any) {
this.configs.set(name, config);
registerModels(config, name);
this.clients.set(name, createClient(name));
}
getClient(projectName: string) {
const client = this.clients.get(projectName);
if (!client) {
throw new Error(`Project ${projectName} not found`);
}
return client;
}
listProjects() {
return Array.from(this.clients.keys());
}
// Cross-project data aggregation
async aggregateData(projectNames: string[], modelName: string, params?: any) {
const results = await Promise.all(
projectNames.map(async (projectName) => {
const client = this.getClient(projectName);
const data = await client.models[modelName].filter(params);
return {
project: projectName,
data: data.tableData,
total: data.total,
};
})
);
return {
projects: results,
totalRecords: results.reduce((sum, result) => sum + result.total, 0),
};
}
}
// Use multi-project manager
const projectManager = new MultiProjectManager();
// Add projects
projectManager.addProject("ecommerce", {
appCode: "ecommerce-app",
models: [
{ datasetCode: "ecommerce-orders", tableName: "orders", alias: "orders" },
{ datasetCode: "ecommerce-customers", tableName: "customers", alias: "customers" },
],
});
projectManager.addProject("analytics", {
appCode: "analytics-app",
models: [
{ datasetCode: "analytics-events", tableName: "events", alias: "events" },
{ datasetCode: "analytics-reports", tableName: "reports", alias: "reports" },
],
});
// Get data from specific project
const ecommerceClient = projectManager.getClient("ecommerce");
const orders = await ecommerceClient.models.orders.filter();
// Cross-project data aggregation
const aggregatedData = await projectManager.aggregateData(
["ecommerce", "analytics"],
"users",
{ status: "active" }
);
⥠Performance Optimizationâ
1. Configuration Registration Optimizationâ
// â
Recommended: Pre-register configuration
registerModels(LARGE_CONFIG);
const client1 = createClient(); // No parameters, best performance
const client2 = createClient(); // Reuse registered configuration
// â Not recommended: Pass configuration every time
const client1 = createClient(LARGE_CONFIG); // Process configuration every time
const client2 = createClient(LARGE_CONFIG); // Duplicate passing
2. Request Caching Systemâ
interface CacheOptions {
ttl?: number; // Cache time (milliseconds)
maxSize?: number; // Maximum cache items
keyGenerator?: (params: any) => string; // Custom key generator
}
class RequestCache {
private cache = new Map<
string,
{ data: any; timestamp: number; ttl: number }
>();
private maxSize: number;
constructor(private options: CacheOptions = {}) {
this.maxSize = options.maxSize || 1000;
}
private generateKey(modelName: string, method: string, params: any): string {
if (this.options.keyGenerator) {
return this.options.keyGenerator({ modelName, method, params });
}
return `${modelName}:${method}:${JSON.stringify(params)}`;
}
get<T>(modelName: string, method: string, params: any): T | null {
const key = this.generateKey(modelName, method, params);
const item = this.cache.get(key);
if (!item) return null;
// Check if expired
if (Date.now() - item.timestamp > item.ttl) {
this.cache.delete(key);
return null;
}
return item.data;
}
set(modelName: string, method: string, params: any, data: any, ttl?: number) {
const key = this.generateKey(modelName, method, params);
// Clean up expired items
if (this.cache.size >= this.maxSize) {
this.cleanup();
}
this.cache.set(key, {
data,
timestamp: Date.now(),
ttl: ttl || this.options.ttl || 5 * 60 * 1000, // Default 5 minutes
});
}
private cleanup() {
const now = Date.now();
const entries = Array.from(this.cache.entries());
// Delete expired items
for (const [key, item] of entries) {
if (now - item.timestamp > item.ttl) {
this.cache.delete(key);
}
}
// If still too many, delete oldest items
if (this.cache.size >= this.maxSize) {
const sortedEntries = entries
.filter(([key]) => this.cache.has(key))
.sort(([, a], [, b]) => a.timestamp - b.timestamp);
const toDelete = sortedEntries.slice(0, Math.floor(this.maxSize * 0.2));
toDelete.forEach(([key]) => this.cache.delete(key));
}
}
clear() {
this.cache.clear();
}
size() {
return this.cache.size;
}
}
// Cached client wrapper
class CachedClient {
private cache = new RequestCache({ ttl: 10 * 60 * 1000 }); // 10-minute cache
constructor(private client: any) {}
async cachedRequest<T>(
modelName: string,
method: string,
params: any = {},
options: { useCache?: boolean; cacheTtl?: number } = {}
): Promise<T> {
const { useCache = true, cacheTtl } = options;
// Try to get from cache
if (useCache && method === "getList") {
const cached = this.cache.get<T>(modelName, method, params);
if (cached) {
console.log(`Cache hit: ${modelName}.${method}`);
return cached;
}
}
// Get from API
const result = await this.client.models[modelName][method](params);
// Store in cache (only cache query operations)
if (useCache && method === "getList") {
this.cache.set(modelName, method, params, result, cacheTtl);
}
// If it's a modification operation, invalidate related cache
if (["create", "update", "delete"].includes(method)) {
this.invalidateCache(modelName);
}
return result;
}
private invalidateCache(modelName: string) {
// Simple implementation: clear all cache for this model
this.cache.clear(); // Or implement more precise cache invalidation logic
}
// Convenience methods
async getList<T>(modelName: string, params?: any, options?: any): Promise<T> {
return this.cachedRequest(modelName, "getList", params, options);
}
async create<T>(modelName: string, data: any): Promise<T> {
return this.cachedRequest(modelName, "create", data, { useCache: false });
}
async update<T>(modelName: string, id: string, data: any): Promise<T> {
return this.cachedRequest(
modelName,
"update",
{ id, data },
{ useCache: false }
);
}
}
// Use cached client
const cachedClient = new CachedClient(client);
// First request will fetch from API
const users1 = await cachedClient.filter("users", { status: "active" });
// Second identical request will return from cache
const users2 = await cachedClient.filter("users", { status: "active" });
3. Batch Request Optimizationâ
class BatchRequestManager {
private batchQueue = new Map<string, any[]>();
private batchTimer: NodeJS.Timeout | null = null;
private batchDelay = 50; // Requests within 50ms will be batched
constructor(private client: any) {}
async batchGetList<T>(
requests: Array<{ modelName: string; params?: any }>
): Promise<T[]> {
// Execute batch requests
const promises = requests.map(({ modelName, params }) =>
this.client.models[modelName].filter(params)
);
return Promise.all(promises);
}
async batchCreate<T>(modelName: string, items: any[]): Promise<T[]> {
// Process in batches to avoid single request being too large
const batchSize = 10;
const results: T[] = [];
for (let i = 0; i < items.length; i += batchSize) {
const batch = items.slice(i, i + batchSize);
const batchPromises = batch.map((item) =>
this.client.models[modelName].create(item)
);
const batchResults = await Promise.all(batchPromises);
results.push(...batchResults);
// Avoid requests being too frequent
if (i + batchSize < items.length) {
await new Promise((resolve) => setTimeout(resolve, 100));
}
}
return results;
}
// Smart batch update
async smartBatchUpdate<T>(
modelName: string,
updates: Array<{ id: string; data: any }>
): Promise<T[]> {
// Group by similarity of update data
const groups = this.groupSimilarUpdates(updates);
const results: T[] = [];
for (const group of groups) {
const groupPromises = group.map(({ id, data }) =>
this.client.models[modelName].update(id, data)
);
const groupResults = await Promise.all(groupPromises);
results.push(...groupResults);
// Delay between groups
await new Promise((resolve) => setTimeout(resolve, 50));
}
return results;
}
private groupSimilarUpdates(
updates: Array<{ id: string; data: any }>
): Array<Array<{ id: string; data: any }>> {
// Simple implementation: group by update fields
const groups = new Map<string, Array<{ id: string; data: any }>>();
updates.forEach((update) => {
const key = Object.keys(update.data).sort().join(",");
if (!groups.has(key)) {
groups.set(key, []);
}
groups.get(key)!.push(update);
});
return Array.from(groups.values());
}
}
// Use batch manager
const batchManager = new BatchRequestManager(client);
// Batch fetch data from different models
const [users, orders, products] = await batchManager.batchGetList([
{ modelName: "users", params: { status: "active" } },
{ modelName: "orders", params: { status: "pending" } },
{ modelName: "products", params: { inStock: true } },
]);
// Batch create
const newUsers = await batchManager.batchCreate("users", [
{ name: "User 1", email: "user1@example.com" },
{ name: "User 2", email: "user2@example.com" },
// ... more users
]);
4. Request Deduplicationâ
class RequestDeduplicator {
private pendingRequests = new Map<string, Promise<any>>();
private generateKey(modelName: string, method: string, params: any): string {
return `${modelName}:${method}:${JSON.stringify(params)}`;
}
async request<T>(
modelName: string,
method: string,
params?: any
): Promise<T> {
const key = this.generateKey(modelName, method, params);
// If the same request is in progress, return that Promise directly
if (this.pendingRequests.has(key)) {
console.log(`Request deduplication: ${key}`);
return this.pendingRequests.get(key);
}
// Create new request
const promise = client.models[modelName][method](params).finally(() => {
// Clear cache after request completes
this.pendingRequests.delete(key);
});
this.pendingRequests.set(key, promise);
return promise;
}
}
const deduplicator = new RequestDeduplicator();
// Multiple calls with same parameters will be merged
const [result1, result2, result3] = await Promise.all([
deduplicator.request("users", "getList", { status: "active" }),
deduplicator.request("users", "getList", { status: "active" }),
deduplicator.request("users", "getList", { status: "active" }),
]);
// Only one request will actually be made, all three results are the same
đĄī¸ Advanced Error Handlingâ
Error Retry Mechanismâ
interface RetryOptions {
maxRetries: number;
baseDelay: number;
maxDelay: number;
backoffFactor: number;
retryCondition?: (error: any) => boolean;
}
class RetryableClient {
constructor(private client: any, private retryOptions: RetryOptions) {}
async withRetry<T>(operation: () => Promise<T>): Promise<T> {
let lastError: any;
let delay = this.retryOptions.baseDelay;
for (
let attempt = 1;
attempt <= this.retryOptions.maxRetries + 1;
attempt++
) {
try {
return await operation();
} catch (error: any) {
lastError = error;
// Check if should retry
if (
attempt > this.retryOptions.maxRetries ||
!this.shouldRetry(error)
) {
break;
}
console.log(
`Request failed, retrying after ${delay}ms (${attempt}/${this.retryOptions.maxRetries})`
);
// Wait then retry
await new Promise((resolve) => setTimeout(resolve, delay));
// Exponential backoff
delay = Math.min(
delay * this.retryOptions.backoffFactor,
this.retryOptions.maxDelay
);
}
}
throw lastError;
}
private shouldRetry(error: any): boolean {
// Custom retry condition
if (this.retryOptions.retryCondition) {
return this.retryOptions.retryCondition(error);
}
// Default retry condition
const retryableStatuses = [408, 429, 502, 503, 504];
return (
retryableStatuses.includes(error.status) || error.code === "NETWORK_ERROR"
);
}
// Wrap original methods
async getList<T>(modelName: string, params?: any): Promise<T> {
return this.withRetry(() => this.client.models[modelName].filter(params));
}
async create<T>(modelName: string, data: any): Promise<T> {
return this.withRetry(() => this.client.models[modelName].create(data));
}
}
// Use retryable client
const retryableClient = new RetryableClient(client, {
maxRetries: 3,
baseDelay: 1000,
maxDelay: 10000,
backoffFactor: 2,
retryCondition: (error) => error.status >= 500 || error.status === 429,
});
// Auto-retry requests
const users = await retryableClient.filter("users", { status: "active" });
Circuit Breaker Patternâ
interface CircuitBreakerOptions {
failureThreshold: number; // Failure count threshold
recoveryTimeout: number; // Recovery attempt timeout
monitoringPeriod: number; // Monitoring period
}
enum CircuitState {
CLOSED = "CLOSED", // Normal state
OPEN = "OPEN", // Open state
HALF_OPEN = "HALF_OPEN", // Half-open state
}
class CircuitBreaker {
private state = CircuitState.CLOSED;
private failures = 0;
private lastFailureTime = 0;
private nextAttempt = 0;
constructor(private options: CircuitBreakerOptions) {}
async execute<T>(operation: () => Promise<T>): Promise<T> {
if (this.state === CircuitState.OPEN) {
if (Date.now() < this.nextAttempt) {
throw new Error("Circuit breaker is OPEN");
}
this.state = CircuitState.HALF_OPEN;
}
try {
const result = await operation();
this.onSuccess();
return result;
} catch (error) {
this.onFailure();
throw error;
}
}
private onSuccess() {
this.failures = 0;
this.state = CircuitState.CLOSED;
}
private onFailure() {
this.failures++;
this.lastFailureTime = Date.now();
if (this.failures >= this.options.failureThreshold) {
this.state = CircuitState.OPEN;
this.nextAttempt = Date.now() + this.options.recoveryTimeout;
}
}
getState(): CircuitState {
return this.state;
}
getMetrics() {
return {
state: this.state,
failures: this.failures,
lastFailureTime: this.lastFailureTime,
nextAttempt: this.nextAttempt,
};
}
}
// Create circuit breaker for each model
class CircuitBreakerClient {
private breakers = new Map<string, CircuitBreaker>();
constructor(private client: any) {}
private getBreaker(modelName: string): CircuitBreaker {
if (!this.breakers.has(modelName)) {
this.breakers.set(
modelName,
new CircuitBreaker({
failureThreshold: 5,
recoveryTimeout: 60000,
monitoringPeriod: 10000,
})
);
}
return this.breakers.get(modelName)!;
}
async getList<T>(modelName: string, params?: any): Promise<T> {
const breaker = this.getBreaker(modelName);
return breaker.execute(() => this.client.models[modelName].filter(params));
}
getHealthStatus() {
const status = {};
this.breakers.forEach((breaker, modelName) => {
status[modelName] = breaker.getMetrics();
});
return status;
}
}
const breakerClient = new CircuitBreakerClient(client);
// Monitor health status
setInterval(() => {
console.log("Circuit Breaker Status:", breakerClient.getHealthStatus());
}, 30000);
đ§ Custom Extensionsâ
Middleware Systemâ
interface Middleware {
name: string;
before?: (context: RequestContext) => Promise<RequestContext>;
after?: (context: ResponseContext) => Promise<ResponseContext>;
error?: (context: ErrorContext) => Promise<void>;
}
interface RequestContext {
modelName: string;
method: string;
params: any;
metadata: Record<string, any>;
}
interface ResponseContext extends RequestContext {
response: any;
duration: number;
}
interface ErrorContext extends RequestContext {
error: any;
duration: number;
}
class MiddlewareClient {
private middlewares: Middleware[] = [];
constructor(private client: any) {}
use(middleware: Middleware) {
this.middlewares.push(middleware);
return this;
}
async execute<T>(
modelName: string,
method: string,
params?: any
): Promise<T> {
let context: RequestContext = {
modelName,
method,
params,
metadata: {},
};
const startTime = Date.now();
try {
// Execute before middleware
for (const middleware of this.middlewares) {
if (middleware.before) {
context = await middleware.before(context);
}
}
// Execute actual request
const response = await this.client.models[modelName][method](
context.params
);
const duration = Date.now() - startTime;
let responseContext: ResponseContext = {
...context,
response,
duration,
};
// Execute after middleware
for (const middleware of this.middlewares.reverse()) {
if (middleware.after) {
responseContext = await middleware.after(responseContext);
}
}
return responseContext.response;
} catch (error) {
const duration = Date.now() - startTime;
const errorContext: ErrorContext = {
...context,
error,
duration,
};
// Execute error middleware
for (const middleware of this.middlewares) {
if (middleware.error) {
await middleware.error(errorContext);
}
}
throw error;
}
}
}
// Built-in middleware
const loggingMiddleware: Middleware = {
name: "logging",
before: async (context) => {
console.log(`đ ${context.modelName}.${context.method}`, context.params);
return context;
},
after: async (context) => {
console.log(
`â
${context.modelName}.${context.method} completed in ${context.duration}ms`
);
return context;
},
error: async (context) => {
console.error(
`â ${context.modelName}.${context.method} failed after ${context.duration}ms:`,
context.error.message
);
},
};
const metricsMiddleware: Middleware = {
name: "metrics",
after: async (context) => {
// Send metrics to monitoring system
sendMetric(`lovrabet.api.${context.modelName}.${context.method}`, {
duration: context.duration,
success: true,
});
return context;
},
error: async (context) => {
sendMetric(`lovrabet.api.${context.modelName}.${context.method}`, {
duration: context.duration,
success: false,
error: context.error.message,
});
},
};
// Use middleware
const middlewareClient = new MiddlewareClient(client)
.use(loggingMiddleware)
.use(metricsMiddleware);
// Now all requests will go through middleware processing
const users = await middlewareClient.execute("users", "getList", {
status: "active",
});
đ Server-Side Proxy Pattern (Dynamic datasetCode)â
In server-side scenarios, you may need to create a proxy API that accepts datasetCode from the frontend and then proxies calls to Lovrabet OpenAPI. This pattern has the following advantages:
- â
Security:
accessKeyis only configured on the server side, not exposed to the frontend - â
Flexibility: Supports any
datasetCodewithout pre-configuration - â Zero Maintenance: Adding new datasets requires no code changes
Core Implementationâ
import { createClient } from "@lovrabet/sdk";
/**
* Dynamically create client proxy request
* @param datasetCode Dataset ID passed from frontend
* @param operation Operation name (getList/getOne/create/update)
* @param params Operation parameters
*/
async function proxyRequest(
datasetCode: string,
operation: string,
params?: any
) {
// Dynamically create client using datasetCode from frontend
const client = createClient({
appCode: process.env.LOVRABET_APP_CODE, // Server-side configuration
accessKey: process.env.LOVRABET_ACCESS_KEY, // Server-side configuration (secure)
env: "online",
models: {
_proxy: {
// Local variable name (any name)
tableName: "_proxy", // Only for identification (OpenAPI doesn't use this)
datasetCode: datasetCode, // Real dataset ID passed from frontend
},
},
});
// Execute operation
const result = await client.models._proxy[operation](params);
return result;
}
Node.js / Bun Server Exampleâ
// server.ts
import { createClient } from "@lovrabet/sdk";
// Route handler: POST /api/openapi-proxy/:operation
async function handleProxyRequest(request: Request) {
const url = new URL(request.url);
const operation = url.pathname.split("/").pop(); // Extract operation name
// Parse request body
const { datasetCode, params } = await request.json();
// Parameter validation
if (!datasetCode) {
return Response.json({ error: "datasetCode is required" }, { status: 400 });
}
// Supported operations
const supportedOps = [
"getList",
"getOne",
"create",
"update",
"getDatasetList",
];
if (!supportedOps.includes(operation)) {
return Response.json(
{ error: `Operation "${operation}" not supported` },
{ status: 400 }
);
}
try {
// Dynamically create client
const client = createClient({
appCode: process.env.LOVRABET_APP_CODE!,
accessKey: process.env.LOVRABET_ACCESS_KEY!,
env: "online",
models: {
_proxy: {
tableName: "_proxy",
datasetCode: datasetCode, // Use datasetCode passed from frontend
},
},
});
// Execute operation
const result = await client.models._proxy[operation](params);
return Response.json({
success: true,
data: result,
operation,
datasetCode,
});
} catch (error: any) {
console.error("Proxy request failed:", error);
return Response.json(
{
success: false,
error: error.message,
},
{ status: 500 }
);
}
}
// Bun.serve example
Bun.serve({
port: 3000,
async fetch(request) {
const url = new URL(request.url);
// Match /api/openapi-proxy/* routes
if (url.pathname.startsWith("/api/openapi-proxy/")) {
return handleProxyRequest(request);
}
return new Response("Not Found", { status: 404 });
},
});
Frontend Call Exampleâ
// Frontend code: Call proxy API
async function fetchUsers(datasetCode: string, page = 1, pageSize = 20) {
const response = await fetch("/api/openapi-proxy/getList", {
method: "POST",
headers: { "Content-Type": "application/json" },
body: JSON.stringify({
datasetCode: datasetCode, // Dynamically pass any datasetCode
params: {
currentPage: page,
pageSize: pageSize,
},
}),
});
const result = await response.json();
return result.data;
}
// Use different datasetCodes
const users = await fetchUsers("dataset-001");
const orders = await fetchUsers("dataset-002");
const products = await fetchUsers("dataset-003");
Next.js API Route Exampleâ
// app/api/openapi-proxy/[operation]/route.ts
import { createClient } from "@lovrabet/sdk";
export async function POST(
request: Request,
{ params }: { params: { operation: string } }
) {
const { operation } = params;
const { datasetCode, params: requestParams } = await request.json();
// Parameter validation
if (!datasetCode) {
return Response.json({ error: "datasetCode is required" }, { status: 400 });
}
// Dynamically create client
const client = createClient({
appCode: process.env.LOVRABET_APP_CODE!,
accessKey: process.env.LOVRABET_ACCESS_KEY!,
models: {
_proxy: { tableName: "_proxy", datasetCode },
},
});
try {
const result = await client.models._proxy[operation](requestParams);
return Response.json({ success: true, data: result });
} catch (error: any) {
return Response.json({ error: error.message }, { status: 500 });
}
}
Express Exampleâ
// express-server.ts
import express from "express";
import { createClient } from "@lovrabet/sdk";
const app = express();
app.use(express.json());
// Proxy route
app.post("/api/openapi-proxy/:operation", async (req, res) => {
const { operation } = req.params;
const { datasetCode, params } = req.body;
if (!datasetCode) {
return res.status(400).json({ error: "datasetCode is required" });
}
try {
const client = createClient({
appCode: process.env.LOVRABET_APP_CODE!,
accessKey: process.env.LOVRABET_ACCESS_KEY!,
models: {
_proxy: { tableName: "_proxy", datasetCode },
},
});
const result = await client.models._proxy[operation](params);
res.json({ success: true, data: result });
} catch (error: any) {
res.status(500).json({ error: error.message });
}
});
app.listen(3000, () => {
console.log("Proxy server running on port 3000");
});
Key Pointsâ
1. _proxy is just a local identifierâ
models: {
_proxy: { // â Can be changed to any name (foo/bar/temp)
tableName: "_proxy", // â OpenAPI doesn't use this value
datasetCode: "xxx", // â This is the value actually sent to the server
},
}
In requests sent to Lovrabet, only datasetCode will be used:
POST /openapi/data/get-list
{
"appCode": "app-c4055413",
"datasetCode": "xxx", // â Use this value
"paramMap": { ... }
}
2. Why don't we need to replace _proxy?â
_proxy is just an "alias" used to access the model instance. The SDK internally automatically extracts the datasetCode value. Analogy:
// Similar to naming variables
const user = { id: 123 };
const person = { id: 123 };
// Different variable names, but same data sent
fetch("/api", { body: JSON.stringify(user) }); // Send { id: 123 }
fetch("/api", { body: JSON.stringify(person) }); // Send { id: 123 }
3. Support any datasetCodeâ
The biggest advantage of this pattern is zero configuration:
// No pre-registration needed, supports any datasetCode
proxyRequest("dataset-001", "getList", { currentPage: 1 });
proxyRequest("dataset-002", "getList", { currentPage: 1 });
proxyRequest("brand-new-dataset", "getList", { currentPage: 1 });
Security Considerationsâ
-
Never expose accessKey in the frontend
// â Wrong: Frontend directly uses accessKey
const client = createClient({
accessKey: "your-access-key", // Exposed in client code!
});
// â Correct: Through server-side proxy
fetch("/api/openapi-proxy/getList", {
body: JSON.stringify({ datasetCode: "xxx" }),
}); -
Add authentication
// Verify user identity
const session = await getSession(request);
if (!session) {
return Response.json({ error: "Unauthorized" }, { status: 401 });
} -
datasetCode whitelist (optional)
// If you need to restrict accessible datasets
const allowedDatasets = ["dataset-001", "dataset-002"];
if (!allowedDatasets.includes(datasetCode)) {
return Response.json({ error: "Dataset not allowed" }, { status: 403 });
}
Advantages Summaryâ
| Feature | Traditional Method | Dynamic Proxy Pattern |
|---|---|---|
| Security | Need to configure accessKey | â accessKey only on server |
| Flexibility | Need to pre-register models | â Supports any datasetCode |
| Maintenance | Need code changes for datasets | â Zero config, no changes |
| Use Cases | Fixed dataset applications | â Multi-tenant, SaaS |
This pattern is particularly suitable for:
- đĸ Multi-tenant applications: Each tenant has independent datasets
- đ§ Backend management systems: Need to access multiple dynamic datasets
- đ SaaS platforms: Customers can customize datasets
đ Next Stepsâ
After learning about advanced features, you can continue with:
- đ ī¸ Practical Examples - React/Vue integration examples
- đ§ Troubleshooting - Common issues and debugging tips
â Common Questionsâ
Q: How to monitor SDK performance?â
// Performance monitoring example
class PerformanceMonitor {
private metrics = new Map<
string,
Array<{ duration: number; timestamp: number }>
>();
recordRequest(key: string, duration: number) {
if (!this.metrics.has(key)) {
this.metrics.set(key, []);
}
const records = this.metrics.get(key)!;
records.push({ duration, timestamp: Date.now() });
// Only keep the latest 100 records
if (records.length > 100) {
records.shift();
}
}
getStats(key: string) {
const records = this.metrics.get(key) || [];
if (records.length === 0) return null;
const durations = records.map((r) => r.duration);
return {
count: records.length,
avg: durations.reduce((a, b) => a + b, 0) / durations.length,
min: Math.min(...durations),
max: Math.max(...durations),
p95: this.percentile(durations, 0.95),
};
}
private percentile(arr: number[], p: number) {
const sorted = arr.slice().sort((a, b) => a - b);
const index = Math.ceil(sorted.length * p) - 1;
return sorted[index];
}
}
Q: How to implement request priority queue?â
enum RequestPriority {
HIGH = 1,
MEDIUM = 2,
LOW = 3,
}
class PriorityQueue {
private queue: Array<{
request: () => Promise<any>;
priority: RequestPriority;
resolve: any;
reject: any;
}> = [];
private processing = false;
private maxConcurrent = 3;
private currentlyProcessing = 0;
async enqueue<T>(
request: () => Promise<T>,
priority: RequestPriority = RequestPriority.MEDIUM
): Promise<T> {
return new Promise((resolve, reject) => {
this.queue.push({ request, priority, resolve, reject });
this.queue.sort((a, b) => a.priority - b.priority);
this.process();
});
}
private async process() {
if (this.processing || this.currentlyProcessing >= this.maxConcurrent)
return;
const item = this.queue.shift();
if (!item) return;
this.currentlyProcessing++;
try {
const result = await item.request();
item.resolve(result);
} catch (error) {
item.reject(error);
} finally {
this.currentlyProcessing--;
this.process(); // Process next
}
}
}