diff --git a/.changeset/usage_reporting.md b/.changeset/usage_reporting.md new file mode 100644 index 00000000..9b270772 --- /dev/null +++ b/.changeset/usage_reporting.md @@ -0,0 +1,15 @@ +--- +router: patch +--- + +# Usage Reporting to Hive Console + +Hive Router now supports sending usage reports to the Hive Console. This feature allows you to monitor and analyze the performance and usage of your GraphQL services directly from the Hive Console. +To enable usage reporting, you need to configure the `usage_reporting` section in your Hive Router configuration file. + +[Learn more about usage reporting in the documentation.](https://the-guild.dev/graphql/hive/docs/router/configuration/usage_reporting) +```yaml +usage_reporting: + enabled: true + access_token: "YOUR_HIVE_CONSOLE_ACCESS_TOKEN" +``` \ No newline at end of file diff --git a/Cargo.lock b/Cargo.lock index da277372..7a7815d3 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -2064,6 +2064,7 @@ dependencies = [ "http", "humantime-serde", "jsonwebtoken", + "regex-automata", "retry-policies", "schemars 1.0.5", "serde", diff --git a/Cargo.toml b/Cargo.toml index 3b3d217e..1a9d1c08 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -59,3 +59,4 @@ retry-policies = "0.4.0" reqwest-retry = "0.7.0" reqwest-middleware = "0.4.2" vrl = { version = "0.28.0", features = ["compiler", "parser", "value", "diagnostic", "stdlib", "core"] } +regex-automata = "0.4.10" diff --git a/bin/router/Cargo.toml b/bin/router/Cargo.toml index ee1c5685..018f5d82 100644 --- a/bin/router/Cargo.toml +++ b/bin/router/Cargo.toml @@ -45,6 +45,7 @@ reqwest-retry = { workspace = true } reqwest-middleware = { workspace = true } vrl = { workspace = true } serde_json = { workspace = true } +regex-automata = { workspace = true } mimalloc = { version = "0.1.48", features = ["v3"] } moka = { version = "0.12.10", features = ["future"] } @@ -52,7 +53,6 @@ hive-console-sdk = "0.2.0" ulid = "1.2.1" tokio-util = "0.7.16" cookie = "0.18.1" -regex-automata = "0.4.10" arc-swap = "1.7.1" lasso2 = "0.8.2" ahash = "0.8.12" diff --git a/bin/router/src/lib.rs b/bin/router/src/lib.rs index f3f12c9a..d565c939 100644 --- a/bin/router/src/lib.rs +++ b/bin/router/src/lib.rs @@ -20,7 +20,7 @@ use crate::{ }, jwt::JwtAuthRuntime, logger::configure_logging, - pipeline::graphql_request_handler, + pipeline::{graphql_request_handler, usage_reporting::init_hive_user_agent}, }; pub use crate::{schema_state::SchemaState, shared_state::RouterSharedState}; @@ -112,11 +112,23 @@ pub async fn configure_app_from_config( false => None, }; + let hive_usage_agent = match router_config.usage_reporting.enabled { + true => Some(init_hive_user_agent( + bg_tasks_manager, + &router_config.usage_reporting, + )?), + false => None, + }; + let router_config_arc = Arc::new(router_config); let schema_state = SchemaState::new_from_config(bg_tasks_manager, router_config_arc.clone()).await?; let schema_state_arc = Arc::new(schema_state); - let shared_state = Arc::new(RouterSharedState::new(router_config_arc, jwt_runtime)?); + let shared_state = Arc::new(RouterSharedState::new( + router_config_arc, + jwt_runtime, + hive_usage_agent, + )?); Ok((shared_state, schema_state_arc)) } diff --git a/bin/router/src/pipeline/mod.rs b/bin/router/src/pipeline/mod.rs index 6a85afc5..82e8c000 100644 --- a/bin/router/src/pipeline/mod.rs +++ b/bin/router/src/pipeline/mod.rs @@ -1,4 +1,4 @@ -use std::sync::Arc; +use std::{sync::Arc, time::Instant}; use hive_router_plan_executor::execution::{ client_request_details::{ClientRequestDetails, JwtRequestDetails, OperationDetails}, @@ -48,6 +48,7 @@ pub mod normalize; pub mod parser; pub mod progressive_override; pub mod query_plan; +pub mod usage_reporting; pub mod validation; static GRAPHIQL_HTML: &str = include_str!("../../static/graphiql.html"); @@ -116,6 +117,7 @@ pub async fn execute_pipeline( shared_state: &Arc, schema_state: &Arc, ) -> Result { + let start = Instant::now(); perform_csrf_prevention(req, &shared_state.router_config.csrf)?; let mut execution_request = get_execution_request(req, body_bytes).await?; @@ -231,5 +233,19 @@ pub async fn execute_pipeline( }; let execution_result = execute_plan(req, supergraph, shared_state, &planned_request).await?; + if shared_state.router_config.usage_reporting.enabled { + if let Some(hive_usage_agent) = &shared_state.hive_usage_agent { + usage_reporting::collect_usage_report( + supergraph.supergraph_schema.clone(), + start.elapsed(), + req, + &client_request_details, + hive_usage_agent, + &shared_state.router_config.usage_reporting, + &execution_result, + ); + } + } + Ok(execution_result) } diff --git a/bin/router/src/pipeline/usage_reporting.rs b/bin/router/src/pipeline/usage_reporting.rs new file mode 100644 index 00000000..bf577507 --- /dev/null +++ b/bin/router/src/pipeline/usage_reporting.rs @@ -0,0 +1,117 @@ +use std::{ + sync::Arc, + time::{Duration, SystemTime, UNIX_EPOCH}, +}; + +use async_trait::async_trait; +use graphql_parser::schema::Document; +use hive_console_sdk::agent::{AgentError, ExecutionReport, UsageAgent, UsageAgentExt}; +use hive_router_config::usage_reporting::UsageReportingConfig; +use hive_router_plan_executor::execution::{ + client_request_details::ClientRequestDetails, plan::PlanExecutionOutput, +}; +use ntex::web::HttpRequest; +use rand::Rng; +use tokio_util::sync::CancellationToken; + +use crate::{ + background_tasks::{BackgroundTask, BackgroundTasksManager}, + consts::ROUTER_VERSION, +}; + +#[derive(Debug, thiserror::Error)] +pub enum UsageReportingError { + #[error("Usage Reporting - Access token is missing. Please provide it via 'HIVE_ACCESS_TOKEN' environment variable or under 'usage_reporting.access_token' in the configuration.")] + MissingAccessToken, + #[error("Usage Reporting - Failed to initialize usage agent: {0}")] + AgentCreationError(#[from] AgentError), +} + +pub fn init_hive_user_agent( + bg_tasks_manager: &mut BackgroundTasksManager, + usage_config: &UsageReportingConfig, +) -> Result, UsageReportingError> { + let user_agent = format!("hive-router/{}", ROUTER_VERSION); + let access_token = usage_config + .access_token + .as_deref() + .ok_or(UsageReportingError::MissingAccessToken)?; + + let hive_user_agent = UsageAgent::try_new( + access_token, + usage_config.endpoint.clone(), + usage_config.target_id.clone(), + usage_config.buffer_size, + usage_config.connect_timeout, + usage_config.request_timeout, + usage_config.accept_invalid_certs, + usage_config.flush_interval, + user_agent, + )?; + bg_tasks_manager.register_task(hive_user_agent.clone()); + Ok(hive_user_agent) +} + +#[inline] +pub fn collect_usage_report( + schema: Arc>, + duration: Duration, + req: &HttpRequest, + client_request_details: &ClientRequestDetails, + hive_usage_agent: &Arc, + usage_config: &UsageReportingConfig, + execution_result: &PlanExecutionOutput, +) { + let sample_rate = usage_config.sample_rate.as_f64(); + if sample_rate < 1.0 && !rand::rng().random_bool(sample_rate) { + return; + } + if client_request_details + .operation + .name + .is_some_and(|op_name| usage_config.exclude.iter().any(|s| s == op_name)) + { + return; + } + let client_name = get_header_value(req, &usage_config.client_name_header); + let client_version = get_header_value(req, &usage_config.client_version_header); + let timestamp = SystemTime::now() + .duration_since(UNIX_EPOCH) + .unwrap() + .as_millis() as u64; + let execution_report = ExecutionReport { + schema, + client_name: client_name.map(|s| s.to_owned()), + client_version: client_version.map(|s| s.to_owned()), + timestamp, + duration, + ok: execution_result.error_count == 0, + errors: execution_result.error_count, + operation_body: client_request_details.operation.query.to_owned(), + operation_name: client_request_details + .operation + .name + .map(|op_name| op_name.to_owned()), + persisted_document_hash: None, + }; + + if let Err(err) = hive_usage_agent.add_report(execution_report) { + tracing::error!("Failed to send usage report: {}", err); + } +} + +#[inline] +fn get_header_value<'req>(req: &'req HttpRequest, header_name: &str) -> Option<&'req str> { + req.headers().get(header_name).and_then(|v| v.to_str().ok()) +} + +#[async_trait] +impl BackgroundTask for UsageAgent { + fn id(&self) -> &str { + "hive_console_usage_report_task" + } + + async fn run(&self, token: CancellationToken) { + self.start_flush_interval(Some(token)).await + } +} diff --git a/bin/router/src/schema_state.rs b/bin/router/src/schema_state.rs index 756523dc..0883dc4c 100644 --- a/bin/router/src/schema_state.rs +++ b/bin/router/src/schema_state.rs @@ -1,5 +1,6 @@ use arc_swap::{ArcSwap, Guard}; use async_trait::async_trait; +use graphql_parser::schema::Document; use graphql_tools::validation::utils::ValidationError; use hive_router_config::{supergraph::SupergraphSource, HiveRouterConfig}; use hive_router_plan_executor::{ @@ -43,6 +44,7 @@ pub struct SupergraphData { pub planner: Planner, pub authorization: AuthorizationMetadata, pub subgraph_executor_map: SubgraphExecutorMap, + pub supergraph_schema: Arc>, } #[derive(Debug, thiserror::Error)] @@ -132,6 +134,7 @@ impl SchemaState { )?; Ok(SupergraphData { + supergraph_schema: Arc::new(parsed_supergraph_sdl), metadata, planner, authorization, diff --git a/bin/router/src/shared_state.rs b/bin/router/src/shared_state.rs index 92d169fc..48027c78 100644 --- a/bin/router/src/shared_state.rs +++ b/bin/router/src/shared_state.rs @@ -1,4 +1,5 @@ use graphql_tools::validation::validate::ValidationPlan; +use hive_console_sdk::agent::UsageAgent; use hive_router_config::HiveRouterConfig; use hive_router_plan_executor::headers::{ compile::compile_headers_plan, errors::HeaderRuleCompileError, plan::HeaderRulesPlan, @@ -68,12 +69,14 @@ pub struct RouterSharedState { /// but no longer than `exp` date. pub jwt_claims_cache: JwtClaimsCache, pub jwt_auth_runtime: Option, + pub hive_usage_agent: Option>, } impl RouterSharedState { pub fn new( router_config: Arc, jwt_auth_runtime: Option, + hive_usage_agent: Option>, ) -> Result { Ok(Self { validation_plan: graphql_tools::validation::rules::default_rules_validation_plan(), @@ -92,6 +95,7 @@ impl RouterSharedState { ) .map_err(Box::new)?, jwt_auth_runtime, + hive_usage_agent, }) } } @@ -104,4 +108,6 @@ pub enum SharedStateError { CORSConfig(#[from] Box), #[error("invalid override labels config: {0}")] OverrideLabelsCompile(#[from] Box), + #[error("error creating hive usage agent: {0}")] + UsageAgent(#[from] Box), } diff --git a/docs/README.md b/docs/README.md index 198483d6..9154d12d 100644 --- a/docs/README.md +++ b/docs/README.md @@ -17,6 +17,7 @@ |[**query\_planner**](#query_planner)|`object`|Query planning configuration.
Default: `{"allow_expose":false,"timeout":"10s"}`
|| |[**supergraph**](#supergraph)|`object`|Configuration for the Federation supergraph source. By default, the router will use a local file-based supergraph source (`./supergraph.graphql`).
|| |[**traffic\_shaping**](#traffic_shaping)|`object`|Configuration for the traffic-shaping of the executor. Use these configurations to control how requests are being executed to subgraphs.
Default: `{"all":{"dedupe_enabled":true,"pool_idle_timeout":"50s","request_timeout":"30s"},"max_connections_per_host":100}`
|| +|[**usage\_reporting**](#usage_reporting)|`object`|Configuration for usage reporting to GraphQL Hive.
Default: `{"accept_invalid_certs":false,"access_token":null,"buffer_size":1000,"client_name_header":"graphql-client-name","client_version_header":"graphql-client-version","connect_timeout":"5s","enabled":false,"endpoint":"https://app.graphql-hive.com/usage","exclude":[],"flush_interval":"5s","request_timeout":"15s","sample_rate":"100%","target_id":null}`
|| **Additional Properties:** not allowed **Example** @@ -118,6 +119,20 @@ traffic_shaping: pool_idle_timeout: 50s request_timeout: 30s max_connections_per_host: 100 +usage_reporting: + accept_invalid_certs: false + access_token: null + buffer_size: 1000 + client_name_header: graphql-client-name + client_version_header: graphql-client-version + connect_timeout: 5s + enabled: false + endpoint: https://app.graphql-hive.com/usage + exclude: [] + flush_interval: 5s + request_timeout: 15s + sample_rate: 100% + target_id: null ``` @@ -1885,4 +1900,58 @@ Optional per-subgraph configurations that will override the default configuratio |**request\_timeout**||Optional timeout configuration for requests to subgraphs.

Example with a fixed duration:
```yaml
timeout:
duration: 5s
```

Or with a VRL expression that can return a duration based on the operation kind:
```yaml
timeout:
expression: \|
if (.request.operation.type == "mutation") {
"10s"
} else {
"15s"
}
```
|| **Additional Properties:** not allowed + +## usage\_reporting: object + +Configuration for usage reporting to GraphQL Hive. + + +**Properties** + +|Name|Type|Description|Required| +|----|----|-----------|--------| +|**accept\_invalid\_certs**|`boolean`|Accepts invalid SSL certificates
Default: false
Default: `false`
|| +|**access\_token**|`string`, `null`|Your [Registry Access Token](https://the-guild.dev/graphql/hive/docs/management/targets#registry-access-tokens) with write permission.
|| +|**buffer\_size**|`integer`|A maximum number of operations to hold in a buffer before sending to Hive Console
Default: 1000
Default: `1000`
Format: `"uint"`
Minimum: `0`
|| +|**client\_name\_header**|`string`|Default: `"graphql-client-name"`
|| +|**client\_version\_header**|`string`|Default: `"graphql-client-version"`
|| +|**connect\_timeout**|`string`|A timeout for only the connect phase of a request to Hive Console
Default: 5 seconds
Default: `"5s"`
|| +|**enabled**|`boolean`|Default: `false`
|| +|**endpoint**|`string`|For self-hosting, you can override `/usage` endpoint (defaults to `https://app.graphql-hive.com/usage`).
Default: `"https://app.graphql-hive.com/usage"`
|| +|[**exclude**](#usage_reportingexclude)|`string[]`|A list of operations (by name) to be ignored by Hive.
Default:
|| +|**flush\_interval**|`string`|Frequency of flushing the buffer to the server
Default: 5 seconds
Default: `"5s"`
|| +|**request\_timeout**|`string`|A timeout for the entire request to Hive Console
Default: 15 seconds
Default: `"15s"`
|| +|**sample\_rate**|`string`|Sample rate to determine sampling.
0% = never being sent
50% = half of the requests being sent
100% = always being sent
Default: 100%
Default: `"100%"`
|| +|**target\_id**|`string`, `null`|A target ID, this can either be a slug following the format “$organizationSlug/$projectSlug/$targetSlug” (e.g “the-guild/graphql-hive/staging”) or an UUID (e.g. “a0f4c605-6541-4350-8cfe-b31f21a4bf80”). To be used when the token is configured with an organization access token.
|| + +**Additional Properties:** not allowed +**Example** + +```yaml +accept_invalid_certs: false +access_token: null +buffer_size: 1000 +client_name_header: graphql-client-name +client_version_header: graphql-client-version +connect_timeout: 5s +enabled: false +endpoint: https://app.graphql-hive.com/usage +exclude: [] +flush_interval: 5s +request_timeout: 15s +sample_rate: 100% +target_id: null + +``` + + +### usage\_reporting\.exclude\[\]: array + +A list of operations (by name) to be ignored by Hive. +Example: ["IntrospectionQuery", "MeQuery"] + + +**Items** + +**Item Type:** `string` diff --git a/lib/executor/Cargo.toml b/lib/executor/Cargo.toml index a5d42b28..c291b46a 100644 --- a/lib/executor/Cargo.toml +++ b/lib/executor/Cargo.toml @@ -30,9 +30,9 @@ xxhash-rust = { workspace = true } tokio = { workspace = true, features = ["sync"] } dashmap = { workspace = true } vrl = { workspace = true } +regex-automata = { workspace = true } ahash = "0.8.12" -regex-automata = "0.4.10" strum = { version = "0.27.2", features = ["derive"] } ntex-http = "0.1.15" ordered-float = "4.2.0" diff --git a/lib/executor/src/execution/plan.rs b/lib/executor/src/execution/plan.rs index 22d82c40..7fb657f0 100644 --- a/lib/executor/src/execution/plan.rs +++ b/lib/executor/src/execution/plan.rs @@ -65,6 +65,7 @@ pub struct QueryPlanExecutionContext<'exec, 'req> { pub struct PlanExecutionOutput { pub body: Vec, pub headers: HeaderMap, + pub error_count: usize, } pub async fn execute_query_plan<'exec, 'req>( @@ -104,6 +105,7 @@ pub async fn execute_query_plan<'exec, 'req>( })?; let final_response = &exec_ctx.final_response; + let error_count = exec_ctx.errors.len(); // Added for usage reporting let body = project_by_operation( final_response, exec_ctx.errors, @@ -121,6 +123,7 @@ pub async fn execute_query_plan<'exec, 'req>( Ok(PlanExecutionOutput { body, headers: response_headers, + error_count, }) } diff --git a/lib/router-config/Cargo.toml b/lib/router-config/Cargo.toml index 828a5787..089177fa 100644 --- a/lib/router-config/Cargo.toml +++ b/lib/router-config/Cargo.toml @@ -23,6 +23,7 @@ http = { workspace = true } jsonwebtoken = { workspace = true } retry-policies = { workspace = true} tracing = { workspace = true } +regex-automata = { workspace = true } schemars = "1.0.4" humantime-serde = "1.1.1" diff --git a/lib/router-config/src/env_overrides.rs b/lib/router-config/src/env_overrides.rs index f6101296..778f90a3 100644 --- a/lib/router-config/src/env_overrides.rs +++ b/lib/router-config/src/env_overrides.rs @@ -33,6 +33,10 @@ pub struct EnvVarOverrides { pub hive_console_cdn_key: Option, #[envconfig(from = "HIVE_CDN_POLL_INTERVAL")] pub hive_console_cdn_poll_interval: Option, + #[envconfig(from = "HIVE_ACCESS_TOKEN")] + pub hive_access_token: Option, + #[envconfig(from = "HIVE_TARGET")] + pub hive_target: Option, } #[derive(Debug, thiserror::Error)] @@ -99,6 +103,14 @@ impl EnvVarOverrides { } } + if let Some(hive_access_token) = self.hive_access_token.take() { + config = config.set_override("usage_reporting.access_token", hive_access_token)?; + if let Some(hive_target) = self.hive_target.take() { + config = config.set_override("usage_reporting.target_id", hive_target)?; + } + config = config.set_override("usage_reporting.enabled", true)?; + } + // GraphiQL overrides if let Some(graphiql_enabled) = self.graphiql_enabled.take() { config = config.set_override("graphiql.enabled", graphiql_enabled)?; diff --git a/lib/router-config/src/lib.rs b/lib/router-config/src/lib.rs index 105e0a72..4411a501 100644 --- a/lib/router-config/src/lib.rs +++ b/lib/router-config/src/lib.rs @@ -13,6 +13,7 @@ pub mod primitives; pub mod query_planner; pub mod supergraph; pub mod traffic_shaping; +pub mod usage_reporting; use config::{Config, File, FileFormat, FileSourceFile}; use envconfig::Envconfig; @@ -96,6 +97,9 @@ pub struct HiveRouterConfig { #[serde(default)] pub authorization: authorization::AuthorizationConfig, + /// Configuration for usage reporting to GraphQL Hive. + #[serde(default)] + pub usage_reporting: usage_reporting::UsageReportingConfig, } #[derive(Debug, thiserror::Error)] diff --git a/lib/router-config/src/usage_reporting.rs b/lib/router-config/src/usage_reporting.rs new file mode 100644 index 00000000..f39d7227 --- /dev/null +++ b/lib/router-config/src/usage_reporting.rs @@ -0,0 +1,255 @@ +use std::{fmt::Display, str::FromStr, time::Duration}; + +use schemars::JsonSchema; +use serde::{Deserialize, Serialize}; + +#[derive(Debug, Deserialize, Serialize, JsonSchema, Clone)] +#[serde(deny_unknown_fields)] +pub struct UsageReportingConfig { + #[serde(default = "default_enabled")] + pub enabled: bool, + + /// Your [Registry Access Token](https://the-guild.dev/graphql/hive/docs/management/targets#registry-access-tokens) with write permission. + pub access_token: Option, + + /// A target ID, this can either be a slug following the format “$organizationSlug/$projectSlug/$targetSlug” (e.g “the-guild/graphql-hive/staging”) or an UUID (e.g. “a0f4c605-6541-4350-8cfe-b31f21a4bf80”). To be used when the token is configured with an organization access token. + #[serde(deserialize_with = "deserialize_target_id")] + pub target_id: Option, + /// For self-hosting, you can override `/usage` endpoint (defaults to `https://app.graphql-hive.com/usage`). + #[serde(default = "default_endpoint")] + pub endpoint: String, + + /// Sample rate to determine sampling. + /// 0% = never being sent + /// 50% = half of the requests being sent + /// 100% = always being sent + /// Default: 100% + #[serde(default = "default_sample_rate")] + #[schemars(with = "String")] + pub sample_rate: Percentage, + + /// A list of operations (by name) to be ignored by Hive. + /// Example: ["IntrospectionQuery", "MeQuery"] + #[serde(default)] + pub exclude: Vec, + + #[serde(default = "default_client_name_header")] + pub client_name_header: String, + #[serde(default = "default_client_version_header")] + pub client_version_header: String, + + /// A maximum number of operations to hold in a buffer before sending to Hive Console + /// Default: 1000 + #[serde(default = "default_buffer_size")] + pub buffer_size: usize, + + /// Accepts invalid SSL certificates + /// Default: false + #[serde(default = "default_accept_invalid_certs")] + pub accept_invalid_certs: bool, + + /// A timeout for only the connect phase of a request to Hive Console + /// Default: 5 seconds + #[serde( + default = "default_connect_timeout", + deserialize_with = "humantime_serde::deserialize", + serialize_with = "humantime_serde::serialize" + )] + #[schemars(with = "String")] + pub connect_timeout: Duration, + + /// A timeout for the entire request to Hive Console + /// Default: 15 seconds + #[serde( + default = "default_request_timeout", + deserialize_with = "humantime_serde::deserialize", + serialize_with = "humantime_serde::serialize" + )] + #[schemars(with = "String")] + pub request_timeout: Duration, + + /// Frequency of flushing the buffer to the server + /// Default: 5 seconds + #[serde( + default = "default_flush_interval", + deserialize_with = "humantime_serde::deserialize", + serialize_with = "humantime_serde::serialize" + )] + #[schemars(with = "String")] + pub flush_interval: Duration, +} + +impl Default for UsageReportingConfig { + fn default() -> Self { + Self { + enabled: default_enabled(), + access_token: None, + target_id: None, + endpoint: default_endpoint(), + sample_rate: default_sample_rate(), + exclude: Vec::new(), + client_name_header: default_client_name_header(), + client_version_header: default_client_version_header(), + buffer_size: default_buffer_size(), + accept_invalid_certs: default_accept_invalid_certs(), + connect_timeout: default_connect_timeout(), + request_timeout: default_request_timeout(), + flush_interval: default_flush_interval(), + } + } +} + +fn default_enabled() -> bool { + false +} + +fn default_endpoint() -> String { + "https://app.graphql-hive.com/usage".to_string() +} + +fn default_sample_rate() -> Percentage { + Percentage::from_f64(1.0).unwrap() +} + +fn default_client_name_header() -> String { + "graphql-client-name".to_string() +} + +fn default_client_version_header() -> String { + "graphql-client-version".to_string() +} + +fn default_buffer_size() -> usize { + 1000 +} + +fn default_accept_invalid_certs() -> bool { + false +} + +fn default_request_timeout() -> Duration { + Duration::from_secs(15) +} + +fn default_connect_timeout() -> Duration { + Duration::from_secs(5) +} + +fn default_flush_interval() -> Duration { + Duration::from_secs(5) +} + +// Target ID regexp for validation: slug format +const TARGET_ID_SLUG_REGEX: &str = r"^[a-zA-Z0-9-_]+\/[a-zA-Z0-9-_]+\/[a-zA-Z0-9-_]+$"; +// Target ID regexp for validation: UUID format +const TARGET_ID_UUID_REGEX: &str = + r"^[0-9a-fA-F]{8}-[0-9a-fA-F]{4}-[0-9a-fA-F]{4}-[0-9a-fA-F]{4}-[0-9a-fA-F]{12}$"; + +fn deserialize_target_id<'de, D>(deserializer: D) -> Result, D::Error> +where + D: serde::Deserializer<'de>, +{ + let opt = Option::::deserialize(deserializer)?; + if let Some(ref s) = opt { + let trimmed_s = s.trim(); + if trimmed_s.is_empty() { + Ok(None) + } else { + let slug_regex = + regex_automata::meta::Regex::new(TARGET_ID_SLUG_REGEX).map_err(|err| { + serde::de::Error::custom(format!( + "Failed to compile target_id slug regex: {}", + err + )) + })?; + if slug_regex.is_match(trimmed_s) { + return Ok(Some(trimmed_s.to_string())); + } + let uuid_regex = + regex_automata::meta::Regex::new(TARGET_ID_UUID_REGEX).map_err(|err| { + serde::de::Error::custom(format!( + "Failed to compile target_id UUID regex: {}", + err + )) + })?; + if uuid_regex.is_match(trimmed_s) { + return Ok(Some(trimmed_s.to_string())); + } + Err(serde::de::Error::custom(format!( + "Invalid target_id format: '{}'. It must be either in slug format '$organizationSlug/$projectSlug/$targetSlug' or UUID format 'a0f4c605-6541-4350-8cfe-b31f21a4bf80'", + trimmed_s + ))) + } + } else { + Ok(None) + } +} + +#[derive(Debug, Clone, Copy)] +pub struct Percentage { + value: f64, +} + +impl Percentage { + pub fn from_f64(value: f64) -> Result { + if !(0.0..=1.0).contains(&value) { + return Err(format!( + "Percentage value must be between 0 and 1, got: {}", + value + )); + } + Ok(Percentage { value }) + } + pub fn as_f64(&self) -> f64 { + self.value + } +} + +impl FromStr for Percentage { + type Err = String; + + fn from_str(s: &str) -> Result { + let s_trimmed = s.trim(); + if let Some(number_part) = s_trimmed.strip_suffix('%') { + let value: f64 = number_part.parse().map_err(|err| { + format!( + "Failed to parse percentage value '{}': {}", + number_part, err + ) + })?; + Ok(Percentage::from_f64(value / 100.0)?) + } else { + Err(format!( + "Percentage value must end with '%', got: '{}'", + s_trimmed + )) + } + } +} + +impl Display for Percentage { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + write!(f, "{}%", self.value * 100.0) + } +} + +// Deserializer from `n%` string to `Percentage` struct +impl<'de> Deserialize<'de> for Percentage { + fn deserialize(deserializer: D) -> Result + where + D: serde::Deserializer<'de>, + { + let s = String::deserialize(deserializer)?; + Percentage::from_str(&s).map_err(serde::de::Error::custom) + } +} + +// Serializer from `Percentage` struct to `n%` string +impl Serialize for Percentage { + fn serialize(&self, serializer: S) -> Result + where + S: serde::Serializer, + { + serializer.serialize_str(&self.to_string()) + } +}