Skip to main content
GET
https://api-mainnet.onzks.com
/
v1
/
developer
/
usage
Get Usage
curl --request GET \
  --url https://api-mainnet.onzks.com/v1/developer/usage \
  --header 'Authorization: Bearer <token>'
{
  "success": true,
  "usage": {
    "totalRequests": 123,
    "successfulRequests": 123,
    "failedRequests": 123,
    "successRate": 123,
    "averageLatency": 123,
    "peakLatency": 123,
    "requestsPerMinute": 123,
    "requestsPerHour": 123,
    "requestsPerDay": 123,
    "peakRequestsPerMinute": 123,
    "peakRequestsPerHour": 123,
    "dataTransferred": 123,
    "averageDataPerRequest": 123
  },
  "timeSeries": [
    {
      "timestamp": "<string>",
      "requests": 123,
      "successfulRequests": 123,
      "failedRequests": 123,
      "averageLatency": 123,
      "peakLatency": 123,
      "dataTransferred": 123
    }
  ],
  "breakdown": {
    "byEndpoint": [
      {
        "endpoint": "<string>",
        "requests": 123,
        "successRate": 123,
        "averageLatency": 123,
        "dataTransferred": 123
      }
    ],
    "byMethod": [
      {}
    ],
    "byStatusCode": [
      {}
    ],
    "byKey": [
      {}
    ]
  },
  "errors": {
    "totalErrors": 123,
    "errorRate": 123,
    "byType": [
      {
        "type": "<string>",
        "count": 123,
        "percentage": 123,
        "description": "<string>"
      }
    ],
    "byEndpoint": [
      {}
    ],
    "byStatusCode": [
      {}
    ]
  },
  "rateLimits": {
    "current": {
      "requestsPerMinute": 123,
      "requestsPerHour": 123,
      "requestsPerDay": 123
    },
    "usage": {
      "requestsPerMinute": 123,
      "requestsPerHour": 123,
      "requestsPerDay": 123
    },
    "utilization": {
      "requestsPerMinute": 123,
      "requestsPerHour": 123,
      "requestsPerDay": 123
    }
  },
  "alerts": [
    {
      "type": "<string>",
      "message": "<string>",
      "priority": "<string>",
      "recommendation": "<string>"
    }
  ],
  "pagination": {
    "total": 123,
    "limit": 123,
    "offset": 123,
    "hasMore": true
  },
  "timestamp": "<string>"
}

Overview

Retrieve comprehensive usage statistics and analytics for your API keys. This endpoint provides detailed insights into API usage patterns, rate limit consumption, and performance metrics to help optimize your API key management.
Use this endpoint to monitor API usage, identify optimization opportunities, and ensure you’re staying within rate limits. The data helps with capacity planning and cost optimization.

Parameters

keyId
string
Specific API key ID to get usage for (optional)
timeframe
string
Time period for usage data
  • 1h - Last hour
  • 24h - Last 24 hours (default)
  • 7d - Last 7 days
  • 30d - Last 30 days
  • 90d - Last 90 days
  • 1y - Last year
  • all - All time
granularity
string
Data granularity for time series
  • minute - Per-minute data
  • hour - Per-hour data (default)
  • day - Per-day data
  • week - Per-week data
  • month - Per-month data
includeBreakdown
boolean
Include detailed breakdown by endpoint (default: true)
includeErrors
boolean
Include error statistics (default: true)
includeLatency
boolean
Include latency statistics (default: true)
sortBy
string
Sort results by field
  • requests - By request count (default)
  • errors - By error count
  • latency - By average latency
  • timestamp - By timestamp
sortOrder
string
Sort order
  • desc - Descending (default)
  • asc - Ascending
limit
number
Number of results to return (default: 100, max: 1000)
offset
number
Number of results to skip for pagination (default: 0)

Response

success
boolean
Indicates if the request was successful
usage
object
Usage statistics
timeSeries
array
Time series data points
breakdown
object
Breakdown by endpoint
errors
object
Error statistics
rateLimits
object
Rate limit information
alerts
array
Usage alerts and recommendations
pagination
object
Pagination information
timestamp
string
ISO 8601 timestamp of the response

Examples

curl "https://api.onzks.com/v1/developer/usage" \
  -H "Authorization: Bearer YOUR_API_KEY"

Response Example

{
  "success": true,
  "usage": {
    "totalRequests": 15420,
    "successfulRequests": 15080,
    "failedRequests": 340,
    "successRate": 97.8,
    "averageLatency": 245.5,
    "peakLatency": 1250.0,
    "requestsPerMinute": 10.7,
    "requestsPerHour": 642.5,
    "requestsPerDay": 15420,
    "peakRequestsPerMinute": 25,
    "peakRequestsPerHour": 1200,
    "dataTransferred": 125000000,
    "averageDataPerRequest": 8104
  },
  "timeSeries": [
    {
      "timestamp": "2024-01-20T14:00:00Z",
      "requests": 15,
      "successfulRequests": 14,
      "failedRequests": 1,
      "averageLatency": 230.5,
      "peakLatency": 450.0,
      "dataTransferred": 125000
    },
    {
      "timestamp": "2024-01-20T15:00:00Z",
      "requests": 18,
      "successfulRequests": 18,
      "failedRequests": 0,
      "averageLatency": 245.0,
      "peakLatency": 380.0,
      "dataTransferred": 150000
    }
  ],
  "breakdown": {
    "byEndpoint": [
      {
        "endpoint": "/v1/scores/:identity",
        "requests": 8500,
        "successRate": 98.5,
        "averageLatency": 180.0,
        "dataTransferred": 68000000
      },
      {
        "endpoint": "/v1/identity/:identity",
        "requests": 4200,
        "successRate": 97.2,
        "averageLatency": 320.0,
        "dataTransferred": 42000000
      },
      {
        "endpoint": "/v1/achievements/:identity",
        "requests": 2720,
        "successRate": 96.8,
        "averageLatency": 450.0,
        "dataTransferred": 15000000
      }
    ],
    "byMethod": [
      {
        "method": "GET",
        "requests": 14200,
        "percentage": 92.1
      },
      {
        "method": "POST",
        "requests": 1220,
        "percentage": 7.9
      }
    ],
    "byStatusCode": [
      {
        "statusCode": 200,
        "requests": 15080,
        "percentage": 97.8
      },
      {
        "statusCode": 429,
        "requests": 200,
        "percentage": 1.3
      },
      {
        "statusCode": 500,
        "requests": 140,
        "percentage": 0.9
      }
    ],
    "byKey": [
      {
        "keyId": "key_1234567890abcdef",
        "requests": 12000,
        "percentage": 77.8
      },
      {
        "keyId": "key_abcdef1234567890",
        "requests": 3420,
        "percentage": 22.2
      }
    ]
  },
  "errors": {
    "totalErrors": 340,
    "errorRate": 2.2,
    "byType": [
      {
        "type": "rate_limit_exceeded",
        "count": 200,
        "percentage": 58.8,
        "description": "Rate limit exceeded"
      },
      {
        "type": "server_error",
        "count": 140,
        "percentage": 41.2,
        "description": "Internal server error"
      }
    ],
    "byEndpoint": [
      {
        "endpoint": "/v1/scores/:identity",
        "errors": 120,
        "percentage": 35.3
      },
      {
        "endpoint": "/v1/identity/:identity",
        "errors": 220,
        "percentage": 64.7
      }
    ],
    "byStatusCode": [
      {
        "statusCode": 429,
        "count": 200,
        "percentage": 58.8
      },
      {
        "statusCode": 500,
        "count": 140,
        "percentage": 41.2
      }
    ]
  },
  "rateLimits": {
    "current": {
      "requestsPerMinute": 100,
      "requestsPerHour": 1000,
      "requestsPerDay": 10000
    },
    "usage": {
      "requestsPerMinute": 10.7,
      "requestsPerHour": 642.5,
      "requestsPerDay": 15420
    },
    "utilization": {
      "requestsPerMinute": 10.7,
      "requestsPerHour": 64.25,
      "requestsPerDay": 154.2
    }
  },
  "alerts": [
    {
      "type": "rate_limit_approaching",
      "message": "Daily rate limit is 154.2% utilized",
      "priority": "high",
      "recommendation": "Consider upgrading your plan or optimizing requests"
    },
    {
      "type": "error_rate_high",
      "message": "Error rate is 2.2%, above recommended 1%",
      "priority": "medium",
      "recommendation": "Review error patterns and implement retry logic"
    }
  ],
  "pagination": {
    "total": 24,
    "limit": 100,
    "offset": 0,
    "hasMore": false
  },
  "timestamp": "2024-01-20T15:45:00Z"
}

Use Cases

1. Usage Monitoring Dashboard

Create a comprehensive usage dashboard:
function createUsageDashboard(usageData) {
  const dashboard = {
    overview: {
      totalRequests: usageData.usage.totalRequests,
      successRate: usageData.usage.successRate,
      averageLatency: usageData.usage.averageLatency,
      peakRequests: usageData.usage.peakRequestsPerMinute
    },
    trends: analyzeTrends(usageData.timeSeries),
    alerts: usageData.alerts,
    recommendations: generateRecommendations(usageData)
  };
  
  return dashboard;
}

function analyzeTrends(timeSeries) {
  const trends = {
    requests: calculateTrend(timeSeries.map(t => t.requests)),
    latency: calculateTrend(timeSeries.map(t => t.averageLatency)),
    errors: calculateTrend(timeSeries.map(t => t.failedRequests))
  };
  
  return trends;
}

function calculateTrend(values) {
  if (values.length < 2) return 'stable';
  
  const firstHalf = values.slice(0, Math.floor(values.length / 2));
  const secondHalf = values.slice(Math.floor(values.length / 2));
  
  const firstAvg = firstHalf.reduce((sum, val) => sum + val, 0) / firstHalf.length;
  const secondAvg = secondHalf.reduce((sum, val) => sum + val, 0) / secondHalf.length;
  
  const change = (secondAvg - firstAvg) / firstAvg;
  
  if (change > 0.1) return 'increasing';
  if (change < -0.1) return 'decreasing';
  return 'stable';
}

2. Performance Optimization

Optimize API usage based on data:
function optimizeUsage(usageData) {
  const optimization = {
    bottlenecks: identifyBottlenecks(usageData),
    recommendations: generateOptimizationRecommendations(usageData),
    costSavings: calculateCostSavings(usageData)
  };
  
  return optimization;
}

function identifyBottlenecks(usageData) {
  const bottlenecks = [];
  
  // High latency endpoints
  usageData.breakdown.byEndpoint.forEach(endpoint => {
    if (endpoint.averageLatency > 500) {
      bottlenecks.push({
        type: 'high_latency',
        endpoint: endpoint.endpoint,
        latency: endpoint.averageLatency,
        recommendation: 'Consider caching or optimizing this endpoint'
      });
    }
  });
  
  // High error rate endpoints
  usageData.breakdown.byEndpoint.forEach(endpoint => {
    if (endpoint.successRate < 95) {
      bottlenecks.push({
        type: 'high_error_rate',
        endpoint: endpoint.endpoint,
        successRate: endpoint.successRate,
        recommendation: 'Investigate and fix errors in this endpoint'
      });
    }
  });
  
  // Rate limit issues
  if (usageData.rateLimits.utilization.requestsPerDay > 80) {
    bottlenecks.push({
      type: 'rate_limit_approaching',
      utilization: usageData.rateLimits.utilization.requestsPerDay,
      recommendation: 'Consider upgrading your plan or optimizing requests'
    });
  }
  
  return bottlenecks;
}

3. Cost Analysis

Analyze API usage costs:
function analyzeCosts(usageData) {
  const costs = {
    current: calculateCurrentCosts(usageData),
    projected: calculateProjectedCosts(usageData),
    optimization: calculateOptimizationSavings(usageData)
  };
  
  return costs;
}

function calculateCurrentCosts(usageData) {
  const requests = usageData.usage.totalRequests;
  const dataTransferred = usageData.usage.dataTransferred;
  
  // Example pricing (adjust based on actual pricing)
  const requestCost = requests * 0.001; // $0.001 per request
  const dataCost = dataTransferred * 0.000001; // $0.000001 per byte
  
  return {
    requests: requestCost,
    data: dataCost,
    total: requestCost + dataCost
  };
}

function calculateProjectedCosts(usageData) {
  const current = calculateCurrentCosts(usageData);
  const growthRate = calculateGrowthRate(usageData.timeSeries);
  
  return {
    nextMonth: current.total * (1 + growthRate),
    nextQuarter: current.total * Math.pow(1 + growthRate, 3),
    nextYear: current.total * Math.pow(1 + growthRate, 12)
  };
}

4. Alert Management

Manage usage alerts:
function manageAlerts(usageData) {
  const alerts = [];
  
  // Rate limit alerts
  if (usageData.rateLimits.utilization.requestsPerDay > 90) {
    alerts.push({
      type: 'critical',
      message: 'Daily rate limit exceeded',
      action: 'upgrade_plan'
    });
  } else if (usageData.rateLimits.utilization.requestsPerDay > 80) {
    alerts.push({
      type: 'warning',
      message: 'Daily rate limit approaching',
      action: 'monitor_usage'
    });
  }
  
  // Error rate alerts
  if (usageData.usage.successRate < 95) {
    alerts.push({
      type: 'warning',
      message: 'High error rate detected',
      action: 'investigate_errors'
    });
  }
  
  // Latency alerts
  if (usageData.usage.averageLatency > 1000) {
    alerts.push({
      type: 'warning',
      message: 'High latency detected',
      action: 'optimize_requests'
    });
  }
  
  return alerts;
}

5. Capacity Planning

Plan for future capacity needs:
function planCapacity(usageData) {
  const plan = {
    current: {
      requests: usageData.usage.totalRequests,
      latency: usageData.usage.averageLatency,
      errors: usageData.usage.failedRequests
    },
    projected: {
      requests: projectRequests(usageData.timeSeries),
      latency: projectLatency(usageData.timeSeries),
      errors: projectErrors(usageData.timeSeries)
    },
    recommendations: generateCapacityRecommendations(usageData)
  };
  
  return plan;
}

function projectRequests(timeSeries) {
  if (timeSeries.length < 2) return timeSeries[0]?.requests || 0;
  
  const requests = timeSeries.map(t => t.requests);
  const trend = calculateTrend(requests);
  
  if (trend === 'increasing') {
    const growthRate = (requests[requests.length - 1] - requests[0]) / requests[0];
    return Math.ceil(requests[requests.length - 1] * (1 + growthRate));
  }
  
  return requests[requests.length - 1];
}

Best Practices

1. Regular Monitoring

Monitor usage regularly:
async function monitorUsage() {
  const usage = await getUsage({ timeframe: '24h' });
  
  const monitoring = {
    alerts: usage.alerts,
    recommendations: generateRecommendations(usage),
    trends: analyzeTrends(usage.timeSeries)
  };
  
  return monitoring;
}

2. Automated Alerts

Set up automated alerts:
function setupUsageAlerts(usageData) {
  const alerts = [];
  
  // Rate limit alerts
  if (usageData.rateLimits.utilization.requestsPerDay > 80) {
    alerts.push({
      type: 'rate_limit',
      message: 'Rate limit utilization is high',
      threshold: 80,
      current: usageData.rateLimits.utilization.requestsPerDay
    });
  }
  
  // Error rate alerts
  if (usageData.usage.successRate < 95) {
    alerts.push({
      type: 'error_rate',
      message: 'Success rate is below 95%',
      threshold: 95,
      current: usageData.usage.successRate
    });
  }
  
  // Latency alerts
  if (usageData.usage.averageLatency > 500) {
    alerts.push({
      type: 'latency',
      message: 'Average latency is high',
      threshold: 500,
      current: usageData.usage.averageLatency
    });
  }
  
  return alerts;
}

3. Performance Optimization

Optimize based on usage data:
function optimizePerformance(usageData) {
  const optimization = {
    caching: identifyCachingOpportunities(usageData),
    batching: identifyBatchingOpportunities(usageData),
    rateLimiting: optimizeRateLimiting(usageData)
  };
  
  return optimization;
}

4. Cost Optimization

Optimize costs:
function optimizeCosts(usageData) {
  const optimization = {
    requestOptimization: optimizeRequests(usageData),
    dataOptimization: optimizeDataTransfer(usageData),
    planOptimization: optimizePlan(usageData)
  };
  
  return optimization;
}

Troubleshooting

”No usage data found”

Cause: No usage data for the specified timeframe or key. Solution:
  • Check if the API key has been used
  • Try a longer timeframe
  • Verify the key ID is correct

”Invalid timeframe”

Cause: Unsupported timeframe value. Solution:
  • Use supported timeframes: 1h, 24h, 7d, 30d, 90d, 1y, all
  • Check for typos

”Invalid granularity”

Cause: Unsupported granularity value. Solution:
  • Use supported granularities: minute, hour, day, week, month
  • Check for typos

Rate Limits

Usage requests are subject to rate limits:
  • Free tier: 60 requests per minute
  • Starter tier: 300 requests per minute
  • Professional tier: 1,000 requests per minute
  • Enterprise tier: Custom limits
Implement caching to reduce API calls.