diff --git a/.openshift-tests-extension/openshift_payload_cluster-version-operator.json b/.openshift-tests-extension/openshift_payload_cluster-version-operator.json index 6f6e9dac86..03f4a7f9b5 100644 --- a/.openshift-tests-extension/openshift_payload_cluster-version-operator.json +++ b/.openshift-tests-extension/openshift_payload_cluster-version-operator.json @@ -38,5 +38,19 @@ "source": "openshift:payload:cluster-version-operator", "lifecycle": "blocking", "environmentSelector": {} + }, + { + "name": "[Jira:\"Cluster Version Operator\"] cluster-version-operator should not install resources annotated with release.openshift.io/delete=true", + "labels": { + "42543": {}, + "Conformance": {}, + "High": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:cluster-version-operator", + "lifecycle": "blocking", + "environmentSelector": {} } ] \ No newline at end of file diff --git a/go.mod b/go.mod index 2f6f0474de..3a361a8d47 100644 --- a/go.mod +++ b/go.mod @@ -5,11 +5,11 @@ go 1.24.0 require ( github.com/blang/semver/v4 v4.0.0 github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc - github.com/go-logr/logr v1.4.2 + github.com/go-logr/logr v1.4.3 github.com/google/go-cmp v0.7.0 github.com/google/uuid v1.6.0 - github.com/onsi/ginkgo/v2 v2.21.0 - github.com/onsi/gomega v1.35.1 + github.com/onsi/ginkgo/v2 v2.22.0 + github.com/onsi/gomega v1.36.1 github.com/openshift-eng/openshift-tests-extension v0.0.0-20250220212757-b9c4d98a0c45 github.com/openshift/api v0.0.0-20260116192047-6fb7fdae95fd github.com/openshift/client-go v0.0.0-20260108185524-48f4ccfc4e13 @@ -17,21 +17,23 @@ require ( github.com/operator-framework/api v0.17.1 github.com/operator-framework/operator-lifecycle-manager v0.22.0 github.com/pkg/errors v0.9.1 + github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring v0.88.1 + github.com/prometheus-operator/prometheus-operator/pkg/client v0.88.1 github.com/prometheus/client_golang v1.22.0 github.com/prometheus/client_model v0.6.1 github.com/prometheus/common v0.62.0 github.com/spf13/cobra v1.9.1 golang.org/x/crypto v0.45.0 golang.org/x/net v0.47.0 - golang.org/x/time v0.9.0 + golang.org/x/time v0.13.0 gopkg.in/fsnotify.v1 v1.4.7 - k8s.io/api v0.34.1 - k8s.io/apiextensions-apiserver v0.34.1 - k8s.io/apimachinery v0.34.1 - k8s.io/client-go v0.34.1 + k8s.io/api v0.34.3 + k8s.io/apiextensions-apiserver v0.34.3 + k8s.io/apimachinery v0.34.3 + k8s.io/client-go v0.34.3 k8s.io/klog/v2 v2.130.1 k8s.io/kube-aggregator v0.34.1 - k8s.io/utils v0.0.0-20250604170112-4c0f3b243397 + k8s.io/utils v0.0.0-20251002143259-bc988d571ff4 ) require ( @@ -39,21 +41,30 @@ require ( github.com/antlr4-go/antlr/v4 v4.13.0 // indirect github.com/beorn7/perks v1.0.1 // indirect github.com/cespare/xxhash/v2 v2.3.0 // indirect - github.com/emicklei/go-restful/v3 v3.12.2 // indirect + github.com/emicklei/go-restful/v3 v3.13.0 // indirect github.com/fxamacker/cbor/v2 v2.9.0 // indirect - github.com/go-openapi/jsonpointer v0.21.0 // indirect - github.com/go-openapi/jsonreference v0.20.2 // indirect - github.com/go-openapi/swag v0.23.0 // indirect + github.com/go-openapi/jsonpointer v0.22.1 // indirect + github.com/go-openapi/jsonreference v0.21.2 // indirect + github.com/go-openapi/swag v0.25.1 // indirect + github.com/go-openapi/swag/cmdutils v0.25.1 // indirect + github.com/go-openapi/swag/conv v0.25.1 // indirect + github.com/go-openapi/swag/fileutils v0.25.1 // indirect + github.com/go-openapi/swag/jsonname v0.25.1 // indirect + github.com/go-openapi/swag/jsonutils v0.25.1 // indirect + github.com/go-openapi/swag/loading v0.25.1 // indirect + github.com/go-openapi/swag/mangling v0.25.1 // indirect + github.com/go-openapi/swag/netutils v0.25.1 // indirect + github.com/go-openapi/swag/stringutils v0.25.1 // indirect + github.com/go-openapi/swag/typeutils v0.25.1 // indirect + github.com/go-openapi/swag/yamlutils v0.25.1 // indirect github.com/go-task/slim-sprig/v3 v3.0.0 // indirect github.com/gogo/protobuf v1.3.2 // indirect github.com/google/cel-go v0.26.0 // indirect github.com/google/gnostic-models v0.7.0 // indirect github.com/google/pprof v0.0.0-20241029153458-d1b30febd7db // indirect github.com/inconshreveable/mousetrap v1.1.0 // indirect - github.com/josharian/intern v1.0.0 // indirect github.com/jpillora/backoff v1.0.0 // indirect github.com/json-iterator/go v1.1.12 // indirect - github.com/mailru/easyjson v0.7.7 // indirect github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect github.com/modern-go/reflect2 v1.0.3-0.20250322232337-35a7c28c31ee // indirect github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect @@ -67,26 +78,26 @@ require ( github.com/x448/float16 v0.8.4 // indirect go.opentelemetry.io/otel v1.35.0 // indirect go.opentelemetry.io/otel/trace v1.35.0 // indirect - go.yaml.in/yaml/v2 v2.4.2 // indirect + go.yaml.in/yaml/v2 v2.4.3 // indirect go.yaml.in/yaml/v3 v3.0.4 // indirect golang.org/x/exp v0.0.0-20240719175910-8a7402abbf56 // indirect - golang.org/x/oauth2 v0.27.0 // indirect + golang.org/x/oauth2 v0.31.0 // indirect golang.org/x/sys v0.38.0 // indirect golang.org/x/term v0.37.0 // indirect golang.org/x/text v0.31.0 // indirect golang.org/x/tools v0.38.0 // indirect google.golang.org/genproto/googleapis/api v0.0.0-20250303144028-a0af3efb3deb // indirect google.golang.org/genproto/googleapis/rpc v0.0.0-20250303144028-a0af3efb3deb // indirect - google.golang.org/protobuf v1.36.5 // indirect - gopkg.in/evanphx/json-patch.v4 v4.12.0 // indirect + google.golang.org/protobuf v1.36.10 // indirect + gopkg.in/evanphx/json-patch.v4 v4.13.0 // indirect gopkg.in/inf.v0 v0.9.1 // indirect gopkg.in/yaml.v2 v2.4.0 // indirect gopkg.in/yaml.v3 v3.0.1 // indirect - k8s.io/apiserver v0.34.1 // indirect - k8s.io/component-base v0.34.1 // indirect - k8s.io/kube-openapi v0.0.0-20250710124328-f3f2b991d03b // indirect - sigs.k8s.io/controller-runtime v0.12.1 // indirect - sigs.k8s.io/json v0.0.0-20241014173422-cfa47c3a1cc8 // indirect + k8s.io/apiserver v0.34.3 // indirect + k8s.io/component-base v0.34.3 // indirect + k8s.io/kube-openapi v0.0.0-20250910181357-589584f1c912 // indirect + sigs.k8s.io/controller-runtime v0.22.3 // indirect + sigs.k8s.io/json v0.0.0-20250730193827-2d320260d730 // indirect sigs.k8s.io/randfill v1.0.0 // indirect sigs.k8s.io/structured-merge-diff/v6 v6.3.0 // indirect sigs.k8s.io/yaml v1.6.0 // indirect diff --git a/go.sum b/go.sum index a129e4c4e4..bb93be1809 100644 --- a/go.sum +++ b/go.sum @@ -9,27 +9,48 @@ github.com/blang/semver/v4 v4.0.0/go.mod h1:IbckMUScFkM3pff0VJDNKRiT6TG/YpiHIM2y github.com/cespare/xxhash/v2 v2.3.0 h1:UL815xU9SqsFlibzuggzjXhog7bL6oX9BbNZnL2UFvs= github.com/cespare/xxhash/v2 v2.3.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= github.com/cpuguy83/go-md2man/v2 v2.0.6/go.mod h1:oOW0eioCTA6cOiMLiUPZOpcVxMig6NIQQ7OS05n1F4g= -github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E= github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc h1:U9qPSI2PIWSS1VwoXQT9A3Wy9MM3WgvqSxFWenqJduM= github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= -github.com/emicklei/go-restful/v3 v3.12.2 h1:DhwDP0vY3k8ZzE0RunuJy8GhNpPL6zqLkDf9B/a0/xU= -github.com/emicklei/go-restful/v3 v3.12.2/go.mod h1:6n3XBCmQQb25CM2LCACGz8ukIrRry+4bhvbpWn3mrbc= +github.com/emicklei/go-restful/v3 v3.13.0 h1:C4Bl2xDndpU6nJ4bc1jXd+uTmYPVUwkD6bFY/oTyCes= +github.com/emicklei/go-restful/v3 v3.13.0/go.mod h1:6n3XBCmQQb25CM2LCACGz8ukIrRry+4bhvbpWn3mrbc= github.com/fsnotify/fsnotify v1.9.0 h1:2Ml+OJNzbYCTzsxtv8vKSFD9PbJjmhYF14k/jKC7S9k= github.com/fsnotify/fsnotify v1.9.0/go.mod h1:8jBTzvmWwFyi3Pb8djgCCO5IBqzKJ/Jwo8TRcHyHii0= github.com/fxamacker/cbor/v2 v2.9.0 h1:NpKPmjDBgUfBms6tr6JZkTHtfFGcMKsw3eGcmD/sapM= github.com/fxamacker/cbor/v2 v2.9.0/go.mod h1:vM4b+DJCtHn+zz7h3FFp/hDAI9WNWCsZj23V5ytsSxQ= -github.com/go-logr/logr v1.4.2 h1:6pFjapn8bFcIbiKo3XT4j/BhANplGihG6tvd+8rYgrY= -github.com/go-logr/logr v1.4.2/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY= -github.com/go-openapi/jsonpointer v0.19.6/go.mod h1:osyAmYz/mB/C3I+WsTTSgw1ONzaLJoLCyoi6/zppojs= -github.com/go-openapi/jsonpointer v0.21.0 h1:YgdVicSA9vH5RiHs9TZW5oyafXZFc6+2Vc1rr/O9oNQ= -github.com/go-openapi/jsonpointer v0.21.0/go.mod h1:IUyH9l/+uyhIYQ/PXVA41Rexl+kOkAPDdXEYns6fzUY= -github.com/go-openapi/jsonreference v0.20.2 h1:3sVjiK66+uXK/6oQ8xgcRKcFgQ5KXa2KvnJRumpMGbE= -github.com/go-openapi/jsonreference v0.20.2/go.mod h1:Bl1zwGIM8/wsvqjsOQLJ/SH+En5Ap4rVB5KVcIDZG2k= -github.com/go-openapi/swag v0.22.3/go.mod h1:UzaqsxGiab7freDnrUUra0MwWfN/q7tE4j+VcZ0yl14= -github.com/go-openapi/swag v0.23.0 h1:vsEVJDUo2hPJ2tu0/Xc+4noaxyEffXNIs3cOULZ+GrE= -github.com/go-openapi/swag v0.23.0/go.mod h1:esZ8ITTYEsH1V2trKHjAN8Ai7xHb8RV+YSZ577vPjgQ= +github.com/go-logr/logr v1.4.3 h1:CjnDlHq8ikf6E492q6eKboGOC0T8CDaOvkHCIg8idEI= +github.com/go-logr/logr v1.4.3/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY= +github.com/go-openapi/jsonpointer v0.22.1 h1:sHYI1He3b9NqJ4wXLoJDKmUmHkWy/L7rtEo92JUxBNk= +github.com/go-openapi/jsonpointer v0.22.1/go.mod h1:pQT9OsLkfz1yWoMgYFy4x3U5GY5nUlsOn1qSBH5MkCM= +github.com/go-openapi/jsonreference v0.21.2 h1:Wxjda4M/BBQllegefXrY/9aq1fxBA8sI5M/lFU6tSWU= +github.com/go-openapi/jsonreference v0.21.2/go.mod h1:pp3PEjIsJ9CZDGCNOyXIQxsNuroxm8FAJ/+quA0yKzQ= +github.com/go-openapi/swag v0.25.1 h1:6uwVsx+/OuvFVPqfQmOOPsqTcm5/GkBhNwLqIR916n8= +github.com/go-openapi/swag v0.25.1/go.mod h1:bzONdGlT0fkStgGPd3bhZf1MnuPkf2YAys6h+jZipOo= +github.com/go-openapi/swag/cmdutils v0.25.1 h1:nDke3nAFDArAa631aitksFGj2omusks88GF1VwdYqPY= +github.com/go-openapi/swag/cmdutils v0.25.1/go.mod h1:pdae/AFo6WxLl5L0rq87eRzVPm/XRHM3MoYgRMvG4A0= +github.com/go-openapi/swag/conv v0.25.1 h1:+9o8YUg6QuqqBM5X6rYL/p1dpWeZRhoIt9x7CCP+he0= +github.com/go-openapi/swag/conv v0.25.1/go.mod h1:Z1mFEGPfyIKPu0806khI3zF+/EUXde+fdeksUl2NiDs= +github.com/go-openapi/swag/fileutils v0.25.1 h1:rSRXapjQequt7kqalKXdcpIegIShhTPXx7yw0kek2uU= +github.com/go-openapi/swag/fileutils v0.25.1/go.mod h1:+NXtt5xNZZqmpIpjqcujqojGFek9/w55b3ecmOdtg8M= +github.com/go-openapi/swag/jsonname v0.25.1 h1:Sgx+qbwa4ej6AomWC6pEfXrA6uP2RkaNjA9BR8a1RJU= +github.com/go-openapi/swag/jsonname v0.25.1/go.mod h1:71Tekow6UOLBD3wS7XhdT98g5J5GR13NOTQ9/6Q11Zo= +github.com/go-openapi/swag/jsonutils v0.25.1 h1:AihLHaD0brrkJoMqEZOBNzTLnk81Kg9cWr+SPtxtgl8= +github.com/go-openapi/swag/jsonutils v0.25.1/go.mod h1:JpEkAjxQXpiaHmRO04N1zE4qbUEg3b7Udll7AMGTNOo= +github.com/go-openapi/swag/jsonutils/fixtures_test v0.25.1 h1:DSQGcdB6G0N9c/KhtpYc71PzzGEIc/fZ1no35x4/XBY= +github.com/go-openapi/swag/jsonutils/fixtures_test v0.25.1/go.mod h1:kjmweouyPwRUEYMSrbAidoLMGeJ5p6zdHi9BgZiqmsg= +github.com/go-openapi/swag/loading v0.25.1 h1:6OruqzjWoJyanZOim58iG2vj934TysYVptyaoXS24kw= +github.com/go-openapi/swag/loading v0.25.1/go.mod h1:xoIe2EG32NOYYbqxvXgPzne989bWvSNoWoyQVWEZicc= +github.com/go-openapi/swag/mangling v0.25.1 h1:XzILnLzhZPZNtmxKaz/2xIGPQsBsvmCjrJOWGNz/ync= +github.com/go-openapi/swag/mangling v0.25.1/go.mod h1:CdiMQ6pnfAgyQGSOIYnZkXvqhnnwOn997uXZMAd/7mQ= +github.com/go-openapi/swag/netutils v0.25.1 h1:2wFLYahe40tDUHfKT1GRC4rfa5T1B4GWZ+msEFA4Fl4= +github.com/go-openapi/swag/netutils v0.25.1/go.mod h1:CAkkvqnUJX8NV96tNhEQvKz8SQo2KF0f7LleiJwIeRE= +github.com/go-openapi/swag/stringutils v0.25.1 h1:Xasqgjvk30eUe8VKdmyzKtjkVjeiXx1Iz0zDfMNpPbw= +github.com/go-openapi/swag/stringutils v0.25.1/go.mod h1:JLdSAq5169HaiDUbTvArA2yQxmgn4D6h4A+4HqVvAYg= +github.com/go-openapi/swag/typeutils v0.25.1 h1:rD/9HsEQieewNt6/k+JBwkxuAHktFtH3I3ysiFZqukA= +github.com/go-openapi/swag/typeutils v0.25.1/go.mod h1:9McMC/oCdS4BKwk2shEB7x17P6HmMmA6dQRtAkSnNb8= +github.com/go-openapi/swag/yamlutils v0.25.1 h1:mry5ez8joJwzvMbaTGLhw8pXUnhDK91oSJLDPF1bmGk= +github.com/go-openapi/swag/yamlutils v0.25.1/go.mod h1:cm9ywbzncy3y6uPm/97ysW8+wZ09qsks+9RS8fLWKqg= github.com/go-task/slim-sprig/v3 v3.0.0 h1:sUs3vkvUymDpBKi3qH1YSqBQk9+9D/8M2mN1vB6EwHI= github.com/go-task/slim-sprig/v3 v3.0.0/go.mod h1:W848ghGpv3Qj3dhTPRyJypKRiqCdHZiAzKg9hl15HA8= github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q= @@ -47,8 +68,6 @@ github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0= github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/inconshreveable/mousetrap v1.1.0 h1:wN+x4NVGpMsO7ErUn/mUI3vEoE6Jt13X2s0bqwp9tc8= github.com/inconshreveable/mousetrap v1.1.0/go.mod h1:vpF70FUmC8bwa3OWnCshd2FqLfsEA9PFc4w1p2J65bw= -github.com/josharian/intern v1.0.0 h1:vlS4z54oSdjm0bgjRigI+G1HpF+tI+9rE5LLzOg8HmY= -github.com/josharian/intern v1.0.0/go.mod h1:5DoeVV0s6jJacbCEi61lwdGj/aVlrQvzHFFd8Hwg//Y= github.com/jpillora/backoff v1.0.0 h1:uvFg412JmmHBHw7iwprIxkPMI+sGQ4kzOWsMeHnm2EA= github.com/jpillora/backoff v1.0.0/go.mod h1:J/6gKK9jxlEcS3zixgDgUAsiuZ7yrSoa/FX5e0EB2j4= github.com/json-iterator/go v1.1.12 h1:PV8peI4a0ysnczrg+LtxykD8LfKY9ML6u2jnxaEnrnM= @@ -57,17 +76,12 @@ github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= github.com/klauspost/compress v1.18.0 h1:c/Cqfb0r+Yi+JtIEq73FWXVkRonBlf0CRNYc8Zttxdo= github.com/klauspost/compress v1.18.0/go.mod h1:2Pp+KzxcywXVXMr50+X0Q/Lsb43OQHYWRCY2AiWywWQ= -github.com/kr/pretty v0.2.1/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE= github.com/kr/pretty v0.3.1/go.mod h1:hoEshYVHaxMs3cyo3Yncou5ZscifuDolrwPKZanG3xk= -github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= -github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= github.com/kylelemons/godebug v1.1.0 h1:RPNrshWIDI6G2gRW9EHilWtl7Z6Sb1BR0xunSBf0SNc= github.com/kylelemons/godebug v1.1.0/go.mod h1:9/0rRGxNHcop5bhtWyNeEfOS8JIWk580+fNqagV/RAw= -github.com/mailru/easyjson v0.7.7 h1:UGYAvKxe3sBsEDzO8ZeWOSlIQfWFlxbzLZe7hwFURr0= -github.com/mailru/easyjson v0.7.7/go.mod h1:xzfreul335JAWq5oZzymOObrkdz5UnU4kGfJJLY9Nlc= github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd h1:TRLaZ9cD/w8PVh93nsPXa1VrQ6jlwL5oN8l14QlcNfg= github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= @@ -78,12 +92,8 @@ github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 h1:C3w9PqII01/Oq github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ= github.com/mwitkow/go-conntrack v0.0.0-20190716064945-2f068394615f h1:KUppIJq7/+SVif2QVs3tOP0zanoHgBEVAwHxUSIzRqU= github.com/mwitkow/go-conntrack v0.0.0-20190716064945-2f068394615f/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U= -github.com/nxadm/tail v1.4.8 h1:nPr65rt6Y5JFSKQO7qToXr7pePgD6Gwiw05lkbyAQTE= -github.com/nxadm/tail v1.4.8/go.mod h1:+ncqLTQzXmGhMZNUePPaPqPvBxHAIsmXswZKocGu+AU= -github.com/onsi/ginkgo v1.16.5 h1:8xi0RTUf59SOSfEtZMvwTvXYMzG4gV23XVHOZiXNtnE= -github.com/onsi/ginkgo v1.16.5/go.mod h1:+E8gABHa3K6zRBolWtd+ROzc/U5bkGt0FwiG042wbpU= -github.com/onsi/gomega v1.35.1 h1:Cwbd75ZBPxFSuZ6T+rN/WCb/gOc6YgFBXLlZLhC7Ds4= -github.com/onsi/gomega v1.35.1/go.mod h1:PvZbdDc8J6XJEpDK4HCuRBm8a6Fzp9/DmhC9C7yFlog= +github.com/onsi/gomega v1.36.1 h1:bJDPBO7ibjxcbHMgSCoo4Yj18UWbKDlLwX1x9sybDcw= +github.com/onsi/gomega v1.36.1/go.mod h1:PvZbdDc8J6XJEpDK4HCuRBm8a6Fzp9/DmhC9C7yFlog= github.com/openshift-eng/openshift-tests-extension v0.0.0-20250220212757-b9c4d98a0c45 h1:hXpbYtP3iTh8oy/RKwKkcMziwchY3fIk95ciczf7cOA= github.com/openshift-eng/openshift-tests-extension v0.0.0-20250220212757-b9c4d98a0c45/go.mod h1:6gkP5f2HL0meusT0Aim8icAspcD1cG055xxBZ9yC68M= github.com/openshift/api v0.0.0-20260116192047-6fb7fdae95fd h1:Hpjq/55Qb0Gy65RewTSWWlyxpSovaRF86EPjYQkdlRU= @@ -103,6 +113,10 @@ github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINE github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 h1:Jamvg5psRIccs7FGNTlIRMkT8wgtp5eCXdBlqhYGL6U= github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= +github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring v0.88.1 h1:K/r+qPGyr/Fx9vbN7biV9q2/PV5ETj+bVVH5RUvqEG8= +github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring v0.88.1/go.mod h1:IJwk1oNs212afqGbNnE84GAB95OHtJR/BuI1rKESiYk= +github.com/prometheus-operator/prometheus-operator/pkg/client v0.88.1 h1:IPgsLDrjxouq6JzdVpenq/20dbG8iu1OCd65dWnHNpE= +github.com/prometheus-operator/prometheus-operator/pkg/client v0.88.1/go.mod h1:HdPo2CAQtRuffDdy8yVsHA4XmUJKmAGGzVRgT6TYsgc= github.com/prometheus/client_golang v1.22.0 h1:rb93p9lokFEsctTys46VnV1kLCDpVZ0a/Y92Vm0Zc6Q= github.com/prometheus/client_golang v1.22.0/go.mod h1:R7ljNsLXhuQXYZYtw6GAE9AZg8Y7vEW5scdCXrWRXC0= github.com/prometheus/client_model v0.6.1 h1:ZKSh/rekM+n3CeS952MLRAdFwIKqeY8b62p8ais2e9E= @@ -146,8 +160,8 @@ go.opentelemetry.io/otel/trace v1.35.0 h1:dPpEfJu1sDIqruz7BHFG3c7528f6ddfSWfFDVt go.opentelemetry.io/otel/trace v1.35.0/go.mod h1:WUk7DtFp1Aw2MkvqGdwiXYDZZNvA/1J8o6xRXLrIkyc= go.uber.org/goleak v1.3.0 h1:2K3zAYmnTNqV73imy9J1T3WC+gmCePx2hEGkimedGto= go.uber.org/goleak v1.3.0/go.mod h1:CoHD4mav9JJNrW/WLlf7HGZPjdw8EucARQHekz1X6bE= -go.yaml.in/yaml/v2 v2.4.2 h1:DzmwEr2rDGHl7lsFgAHxmNz/1NlQ7xLIrlN2h5d1eGI= -go.yaml.in/yaml/v2 v2.4.2/go.mod h1:081UH+NErpNdqlCXm3TtEran0rJZGxAYx9hb/ELlsPU= +go.yaml.in/yaml/v2 v2.4.3 h1:6gvOSjQoTB3vt1l+CU+tSyi/HOjfOjRLJ4YwYZGwRO0= +go.yaml.in/yaml/v2 v2.4.3/go.mod h1:zSxWcmIDjOzPXpjlTTbAsKokqkDNAVtZO0WOMiT90s8= go.yaml.in/yaml/v3 v3.0.4 h1:tfq32ie2Jv2UxXFdLJdh3jXuOzWiL1fo0bu/FbuKpbc= go.yaml.in/yaml/v3 v3.0.4/go.mod h1:DhzuOOF2ATzADvBadXxruRBLzYTpT36CKvDb3+aBEFg= golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= @@ -165,8 +179,8 @@ golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLL golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= golang.org/x/net v0.47.0 h1:Mx+4dIFzqraBXUugkia1OOvlD6LemFo1ALMHjrXDOhY= golang.org/x/net v0.47.0/go.mod h1:/jNxtkgq5yWUGYkaZGqo27cfGZ1c5Nen03aYrrKpVRU= -golang.org/x/oauth2 v0.27.0 h1:da9Vo7/tDv5RH/7nZDz1eMGS/q1Vv1N/7FCrBhI9I3M= -golang.org/x/oauth2 v0.27.0/go.mod h1:onh5ek6nERTohokkhCD/y2cV4Do3fxFHFuAejCkRWT8= +golang.org/x/oauth2 v0.31.0 h1:8Fq0yVZLh4j4YA47vHKFTa9Ew5XIrCP8LC6UeNZnLxo= +golang.org/x/oauth2 v0.31.0/go.mod h1:lzm5WQJQwKZ3nwavOZ3IS5Aulzxi68dUSgRHujetwEA= golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= @@ -182,8 +196,8 @@ golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.31.0 h1:aC8ghyu4JhP8VojJ2lEHBnochRno1sgL6nEi9WGFGMM= golang.org/x/text v0.31.0/go.mod h1:tKRAlv61yKIjGGHX/4tP1LTbc13YSec1pxVEWXzfoeM= -golang.org/x/time v0.9.0 h1:EsRrnYcQiGH+5FfbgvV4AP7qEZstoyrHB0DzarOQ4ZY= -golang.org/x/time v0.9.0/go.mod h1:3BpzKBy/shNhVucY/MWOyx10tF3SFh9QdLuxbVysPQM= +golang.org/x/time v0.13.0 h1:eUlYslOIt32DgYD6utsuUeHs4d7AsEYLuIAdg7FlYgI= +golang.org/x/time v0.13.0/go.mod h1:eL/Oa2bBBK0TkX57Fyni+NgnyQQN4LitPmob2Hjnqw4= golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= @@ -198,48 +212,46 @@ google.golang.org/genproto/googleapis/api v0.0.0-20250303144028-a0af3efb3deb h1: google.golang.org/genproto/googleapis/api v0.0.0-20250303144028-a0af3efb3deb/go.mod h1:jbe3Bkdp+Dh2IrslsFCklNhweNTBgSYanP1UXhJDhKg= google.golang.org/genproto/googleapis/rpc v0.0.0-20250303144028-a0af3efb3deb h1:TLPQVbx1GJ8VKZxz52VAxl1EBgKXXbTiU9Fc5fZeLn4= google.golang.org/genproto/googleapis/rpc v0.0.0-20250303144028-a0af3efb3deb/go.mod h1:LuRYeWDFV6WOn90g357N17oMCaxpgCnbi/44qJvDn2I= -google.golang.org/protobuf v1.36.5 h1:tPhr+woSbjfYvY6/GPufUoYizxw1cF/yFoxJ2fmpwlM= -google.golang.org/protobuf v1.36.5/go.mod h1:9fA7Ob0pmnwhb644+1+CVWFRbNajQ6iRojtC/QF5bRE= +google.golang.org/protobuf v1.36.10 h1:AYd7cD/uASjIL6Q9LiTjz8JLcrh/88q5UObnmY3aOOE= +google.golang.org/protobuf v1.36.10/go.mod h1:HTf+CrKn2C3g5S8VImy6tdcUvCska2kB7j23XfzDpco= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk= gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q= -gopkg.in/evanphx/json-patch.v4 v4.12.0 h1:n6jtcsulIzXPJaxegRbvFNNrZDjbij7ny3gmSPG+6V4= -gopkg.in/evanphx/json-patch.v4 v4.12.0/go.mod h1:p8EYWUEYMpynmqDbY58zCKCFZw8pRWMG4EsWvDvM72M= +gopkg.in/evanphx/json-patch.v4 v4.13.0 h1:czT3CmqEaQ1aanPc5SdlgQrrEIb8w/wwCvWWnfEbYzo= +gopkg.in/evanphx/json-patch.v4 v4.13.0/go.mod h1:p8EYWUEYMpynmqDbY58zCKCFZw8pRWMG4EsWvDvM72M= gopkg.in/fsnotify.v1 v1.4.7 h1:xOHLXZwVvI9hhs+cLKq5+I5onOuwQLhQwiu63xxlHs4= gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys= gopkg.in/inf.v0 v0.9.1 h1:73M5CoZyi3ZLMOyDlQh031Cx6N9NDJ2Vvfl76EDAgDc= gopkg.in/inf.v0 v0.9.1/go.mod h1:cWUDdTG/fYaXco+Dcufb5Vnc6Gp2YChqWtbxRZE0mXw= -gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 h1:uRGJdciOHaEIrze2W8Q3AKkepLTh2hOroT7a+7czfdQ= -gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw= gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY= gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ= gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= -k8s.io/api v0.34.1 h1:jC+153630BMdlFukegoEL8E/yT7aLyQkIVuwhmwDgJM= -k8s.io/api v0.34.1/go.mod h1:SB80FxFtXn5/gwzCoN6QCtPD7Vbu5w2n1S0J5gFfTYk= -k8s.io/apiextensions-apiserver v0.34.1 h1:NNPBva8FNAPt1iSVwIE0FsdrVriRXMsaWFMqJbII2CI= -k8s.io/apiextensions-apiserver v0.34.1/go.mod h1:hP9Rld3zF5Ay2Of3BeEpLAToP+l4s5UlxiHfqRaRcMc= -k8s.io/apimachinery v0.34.1 h1:dTlxFls/eikpJxmAC7MVE8oOeP1zryV7iRyIjB0gky4= -k8s.io/apimachinery v0.34.1/go.mod h1:/GwIlEcWuTX9zKIg2mbw0LRFIsXwrfoVxn+ef0X13lw= -k8s.io/apiserver v0.34.1 h1:U3JBGdgANK3dfFcyknWde1G6X1F4bg7PXuvlqt8lITA= -k8s.io/apiserver v0.34.1/go.mod h1:eOOc9nrVqlBI1AFCvVzsob0OxtPZUCPiUJL45JOTBG0= -k8s.io/client-go v0.34.1 h1:ZUPJKgXsnKwVwmKKdPfw4tB58+7/Ik3CrjOEhsiZ7mY= -k8s.io/client-go v0.34.1/go.mod h1:kA8v0FP+tk6sZA0yKLRG67LWjqufAoSHA2xVGKw9Of8= -k8s.io/component-base v0.34.1 h1:v7xFgG+ONhytZNFpIz5/kecwD+sUhVE6HU7qQUiRM4A= -k8s.io/component-base v0.34.1/go.mod h1:mknCpLlTSKHzAQJJnnHVKqjxR7gBeHRv0rPXA7gdtQ0= +k8s.io/api v0.34.3 h1:D12sTP257/jSH2vHV2EDYrb16bS7ULlHpdNdNhEw2S4= +k8s.io/api v0.34.3/go.mod h1:PyVQBF886Q5RSQZOim7DybQjAbVs8g7gwJNhGtY5MBk= +k8s.io/apiextensions-apiserver v0.34.3 h1:p10fGlkDY09eWKOTeUSioxwLukJnm+KuDZdrW71y40g= +k8s.io/apiextensions-apiserver v0.34.3/go.mod h1:aujxvqGFRdb/cmXYfcRTeppN7S2XV/t7WMEc64zB5A0= +k8s.io/apimachinery v0.34.3 h1:/TB+SFEiQvN9HPldtlWOTp0hWbJ+fjU+wkxysf/aQnE= +k8s.io/apimachinery v0.34.3/go.mod h1:/GwIlEcWuTX9zKIg2mbw0LRFIsXwrfoVxn+ef0X13lw= +k8s.io/apiserver v0.34.3 h1:uGH1qpDvSiYG4HVFqc6A3L4CKiX+aBWDrrsxHYK0Bdo= +k8s.io/apiserver v0.34.3/go.mod h1:QPnnahMO5C2m3lm6fPW3+JmyQbvHZQ8uudAu/493P2w= +k8s.io/client-go v0.34.3 h1:wtYtpzy/OPNYf7WyNBTj3iUA0XaBHVqhv4Iv3tbrF5A= +k8s.io/client-go v0.34.3/go.mod h1:OxxeYagaP9Kdf78UrKLa3YZixMCfP6bgPwPwNBQBzpM= +k8s.io/component-base v0.34.3 h1:zsEgw6ELqK0XncCQomgO9DpUIzlrYuZYA0Cgo+JWpVk= +k8s.io/component-base v0.34.3/go.mod h1:5iIlD8wPfWE/xSHTRfbjuvUul2WZbI2nOUK65XL0E/c= k8s.io/klog/v2 v2.130.1 h1:n9Xl7H1Xvksem4KFG4PYbdQCQxqc/tTUyrgXaOhHSzk= k8s.io/klog/v2 v2.130.1/go.mod h1:3Jpz1GvMt720eyJH1ckRHK1EDfpxISzJ7I9OYgaDtPE= k8s.io/kube-aggregator v0.34.1 h1:WNLV0dVNoFKmuyvdWLd92iDSyD/TSTjqwaPj0U9XAEU= k8s.io/kube-aggregator v0.34.1/go.mod h1:RU8j+5ERfp0h+gIvWtxRPfsa5nK7rboDm8RST8BJfYQ= -k8s.io/kube-openapi v0.0.0-20250710124328-f3f2b991d03b h1:MloQ9/bdJyIu9lb1PzujOPolHyvO06MXG5TUIj2mNAA= -k8s.io/kube-openapi v0.0.0-20250710124328-f3f2b991d03b/go.mod h1:UZ2yyWbFTpuhSbFhv24aGNOdoRdJZgsIObGBUaYVsts= -k8s.io/utils v0.0.0-20250604170112-4c0f3b243397 h1:hwvWFiBzdWw1FhfY1FooPn3kzWuJ8tmbZBHi4zVsl1Y= -k8s.io/utils v0.0.0-20250604170112-4c0f3b243397/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0= -sigs.k8s.io/controller-runtime v0.12.1 h1:4BJY01xe9zKQti8oRjj/NeHKRXthf1YkYJAgLONFFoI= -sigs.k8s.io/controller-runtime v0.12.1/go.mod h1:BKhxlA4l7FPK4AQcsuL4X6vZeWnKDXez/vp1Y8dxTU0= -sigs.k8s.io/json v0.0.0-20241014173422-cfa47c3a1cc8 h1:gBQPwqORJ8d8/YNZWEjoZs7npUVDpVXUUOFfW6CgAqE= -sigs.k8s.io/json v0.0.0-20241014173422-cfa47c3a1cc8/go.mod h1:mdzfpAEoE6DHQEN0uh9ZbOCuHbLK5wOm7dK4ctXE9Tg= +k8s.io/kube-openapi v0.0.0-20250910181357-589584f1c912 h1:Y3gxNAuB0OBLImH611+UDZcmKS3g6CthxToOb37KgwE= +k8s.io/kube-openapi v0.0.0-20250910181357-589584f1c912/go.mod h1:kdmbQkyfwUagLfXIad1y2TdrjPFWp2Q89B3qkRwf/pQ= +k8s.io/utils v0.0.0-20251002143259-bc988d571ff4 h1:SjGebBtkBqHFOli+05xYbK8YF1Dzkbzn+gDM4X9T4Ck= +k8s.io/utils v0.0.0-20251002143259-bc988d571ff4/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0= +sigs.k8s.io/controller-runtime v0.22.3 h1:I7mfqz/a/WdmDCEnXmSPm8/b/yRTy6JsKKENTijTq8Y= +sigs.k8s.io/controller-runtime v0.22.3/go.mod h1:+QX1XUpTXN4mLoblf4tqr5CQcyHPAki2HLXqQMY6vh8= +sigs.k8s.io/json v0.0.0-20250730193827-2d320260d730 h1:IpInykpT6ceI+QxKBbEflcR5EXP7sU1kvOlxwZh5txg= +sigs.k8s.io/json v0.0.0-20250730193827-2d320260d730/go.mod h1:mdzfpAEoE6DHQEN0uh9ZbOCuHbLK5wOm7dK4ctXE9Tg= sigs.k8s.io/randfill v1.0.0 h1:JfjMILfT8A6RbawdsK2JXGBR5AQVfd+9TbzrlneTyrU= sigs.k8s.io/randfill v1.0.0/go.mod h1:XeLlZ/jmk4i1HRopwe7/aU3H5n1zNUcX6TM94b3QxOY= sigs.k8s.io/structured-merge-diff/v6 v6.3.0 h1:jTijUJbW353oVOd9oTlifJqOGEkUw2jB/fXCbTiQEco= diff --git a/hack/generate-lib-resources.py b/hack/generate-lib-resources.py index 8f971db10f..6b870a7002 100755 --- a/hack/generate-lib-resources.py +++ b/hack/generate-lib-resources.py @@ -283,6 +283,7 @@ def scheme_group_versions(types): 'github.com/openshift/api/image/v1': {'ImageStream'}, # for payload loading 'github.com/openshift/api/security/v1': {'SecurityContextConstraints'}, 'github.com/operator-framework/api/pkg/operators/v1': {'OperatorGroup'}, + 'github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring/v1': {'PrometheusRule', 'ServiceMonitor'}, 'k8s.io/api/admissionregistration/v1': {'ValidatingWebhookConfiguration'}, 'k8s.io/api/apps/v1': {'DaemonSet', 'Deployment'}, 'k8s.io/api/batch/v1': {'CronJob', 'Job'}, diff --git a/lib/resourceread/resourceread.go b/lib/resourceread/resourceread.go index 5e14135f0c..0b227d0264 100644 --- a/lib/resourceread/resourceread.go +++ b/lib/resourceread/resourceread.go @@ -7,6 +7,7 @@ import ( imagev1 "github.com/openshift/api/image/v1" securityv1 "github.com/openshift/api/security/v1" operatorsv1 "github.com/operator-framework/api/pkg/operators/v1" + monitoringv1 "github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring/v1" admissionregistrationv1 "k8s.io/api/admissionregistration/v1" appsv1 "k8s.io/api/apps/v1" batchv1 "k8s.io/api/batch/v1" @@ -42,6 +43,9 @@ func init() { if err := imagev1.AddToScheme(scheme); err != nil { panic(err) } + if err := monitoringv1.AddToScheme(scheme); err != nil { + panic(err) + } if err := operatorsv1.AddToScheme(scheme); err != nil { panic(err) } @@ -58,6 +62,7 @@ func init() { batchv1.SchemeGroupVersion, corev1.SchemeGroupVersion, imagev1.SchemeGroupVersion, + monitoringv1.SchemeGroupVersion, operatorsv1.SchemeGroupVersion, rbacv1.SchemeGroupVersion, securityv1.SchemeGroupVersion, diff --git a/test/cvo/cvo.go b/test/cvo/cvo.go index 230383e807..322210a6b7 100644 --- a/test/cvo/cvo.go +++ b/test/cvo/cvo.go @@ -1,7 +1,12 @@ package cvo import ( + "bytes" "context" + "fmt" + "os" + "path/filepath" + "time" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/client-go/kubernetes" @@ -9,10 +14,11 @@ import ( g "github.com/onsi/ginkgo/v2" o "github.com/onsi/gomega" - "github.com/openshift/cluster-version-operator/test/oc" ocapi "github.com/openshift/cluster-version-operator/test/oc/api" "github.com/openshift/cluster-version-operator/test/util" + "github.com/openshift/library-go/pkg/manifest" + apierrors "k8s.io/apimachinery/pkg/api/errors" ) var logger = g.GinkgoLogr.WithName("cluster-version-operator-tests") @@ -84,4 +90,54 @@ var _ = g.Describe(`[Jira:"Cluster Version Operator"] cluster-version-operator`, sccAnnotation := cvoPod.Annotations["openshift.io/scc"] o.Expect(sccAnnotation).To(o.Equal("hostaccess"), "Expected the annotation 'openshift.io/scc annotation' on pod %s to have the value 'hostaccess', but got %s", cvoPod.Name, sccAnnotation) }) + + g.It(`should not install resources annotated with release.openshift.io/delete=true`, g.Label("Conformance", "High", "42543"), func() { + ctx := context.Background() + err := util.SkipIfHypershift(ctx, restCfg) + o.Expect(err).NotTo(o.HaveOccurred(), "Failed to determine if cluster is HyperShift") + err = util.SkipIfMicroshift(ctx, restCfg) + o.Expect(err).NotTo(o.HaveOccurred(), "Failed to determine if cluster is MicroShift") + + // Initialize the ocapi.OC instance + g.By("Setting up oc") + ocClient, err := oc.NewOC(ocapi.Options{Logger: logger, Timeout: 90 * time.Second}) + o.Expect(err).NotTo(o.HaveOccurred()) + + g.By("Extracting manifests in the release") + annotation := "release.openshift.io/delete" + tempDir, err := os.MkdirTemp("", "OTA-42543-manifest-") + o.Expect(err).NotTo(o.HaveOccurred(), "create temp manifest dir failed") + manifestDir := ocapi.ReleaseExtractOptions{To: tempDir} + logger.Info(fmt.Sprintf("Extract manifests to: %s", manifestDir.To)) + defer func() { _ = os.RemoveAll(manifestDir.To) }() + err = ocClient.AdmReleaseExtract(manifestDir) + o.Expect(err).NotTo(o.HaveOccurred(), "extracting manifests failed") + + files, err := os.ReadDir(manifestDir.To) + o.Expect(err).NotTo(o.HaveOccurred()) + g.By(fmt.Sprintf("Checking if getting manifests with %s on the cluster led to not-found error", annotation)) + for _, manifestFile := range files { + filePath := filepath.Join(manifestDir.To, manifestFile.Name()) + raw, err := os.ReadFile(filePath) + if err != nil { + o.Expect(err).NotTo(o.HaveOccurred(), "failed to read manifest file") + } + manifests, err := manifest.ParseManifests(bytes.NewReader(raw)) + if err != nil { + // files like release-metadata are not manifest file, so skip them + logger.Error(err, "failed to parse manifest file: "+filePath) + continue + } + + for _, ms := range manifests { + ann := ms.Obj.GetAnnotations() + if ann == nil || ann[annotation] != "true" { + continue + } + manifestFilePath := filepath.Join(manifestDir.To, manifestFile.Name()) + err := util.GetManifestExpectNotFoundError(ms) + o.Expect(apierrors.IsNotFound(err)).To(o.BeTrue(), fmt.Sprintf("The deleted manifest should not be installed, but actually installed: %s, manifest: %v, error: %v", manifestFilePath, string(ms.Raw), err)) + } + } + }) }) diff --git a/test/util/connection.go b/test/util/connection.go new file mode 100644 index 0000000000..b451e14d35 --- /dev/null +++ b/test/util/connection.go @@ -0,0 +1,96 @@ +package util + +import ( + "fmt" + + imageclientv1 "github.com/openshift/client-go/image/clientset/versioned/typed/image/v1" + securityclientv1 "github.com/openshift/client-go/security/clientset/versioned/typed/security/v1" + operatorsclientv1 "github.com/operator-framework/operator-lifecycle-manager/pkg/api/client/clientset/versioned/typed/operators/v1" + monitoringclientv1 "github.com/prometheus-operator/prometheus-operator/pkg/client/versioned/typed/monitoring/v1" + apiextclientv1 "k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset/typed/apiextensions/v1" + admissionregistrationclientv1 "k8s.io/client-go/kubernetes/typed/admissionregistration/v1" + appsclientv1 "k8s.io/client-go/kubernetes/typed/apps/v1" + batchclientv1 "k8s.io/client-go/kubernetes/typed/batch/v1" + coreclientv1 "k8s.io/client-go/kubernetes/typed/core/v1" + rbacclientv1 "k8s.io/client-go/kubernetes/typed/rbac/v1" +) + +func getImageClient() (*imageclientv1.ImageV1Client, error) { + config, err := GetRestConfig() + if err != nil { + return nil, fmt.Errorf("unable to load build config: %w", err) + } + return imageclientv1.NewForConfigOrDie(config), nil +} + +func getSecurityClient() (*securityclientv1.SecurityV1Client, error) { + config, err := GetRestConfig() + if err != nil { + return nil, fmt.Errorf("unable to load build config: %w", err) + } + return securityclientv1.NewForConfigOrDie(config), nil +} + +func getOperatorClient() (*operatorsclientv1.OperatorsV1Client, error) { + config, err := GetRestConfig() + if err != nil { + return nil, fmt.Errorf("unable to load build config: %w", err) + } + return operatorsclientv1.NewForConfigOrDie(config), nil +} + +func getMonitoringClient() (*monitoringclientv1.MonitoringV1Client, error) { + config, err := GetRestConfig() + if err != nil { + return nil, fmt.Errorf("unable to load build config: %w", err) + } + return monitoringclientv1.NewForConfigOrDie(config), nil +} + +func getAdmissionRegistrationClient() (*admissionregistrationclientv1.AdmissionregistrationV1Client, error) { + config, err := GetRestConfig() + if err != nil { + return nil, fmt.Errorf("unable to load build config: %w", err) + } + return admissionregistrationclientv1.NewForConfigOrDie(config), nil +} + +func getAppsClient() (*appsclientv1.AppsV1Client, error) { + config, err := GetRestConfig() + if err != nil { + return nil, fmt.Errorf("unable to load build config: %w", err) + } + return appsclientv1.NewForConfigOrDie(config), nil +} + +func getBatchClient() (*batchclientv1.BatchV1Client, error) { + config, err := GetRestConfig() + if err != nil { + return nil, fmt.Errorf("unable to load build config: %w", err) + } + return batchclientv1.NewForConfigOrDie(config), nil +} + +func getCoreV1Client() (*coreclientv1.CoreV1Client, error) { + config, err := GetRestConfig() + if err != nil { + return nil, fmt.Errorf("unable to load build config: %w", err) + } + return coreclientv1.NewForConfigOrDie(config), nil +} + +func getRbacV1Client() (*rbacclientv1.RbacV1Client, error) { + config, err := GetRestConfig() + if err != nil { + return nil, fmt.Errorf("unable to load build config: %w", err) + } + return rbacclientv1.NewForConfigOrDie(config), nil +} + +func getApiextV1Client() (*apiextclientv1.ApiextensionsV1Client, error) { + config, err := GetRestConfig() + if err != nil { + return nil, fmt.Errorf("unable to load build config: %w", err) + } + return apiextclientv1.NewForConfigOrDie(config), nil +} diff --git a/test/util/util.go b/test/util/util.go index 4d7319a766..3ac0adb196 100644 --- a/test/util/util.go +++ b/test/util/util.go @@ -2,8 +2,21 @@ package util import ( "context" + "fmt" "time" + imagev1 "github.com/openshift/api/image/v1" + securityv1 "github.com/openshift/api/security/v1" + "github.com/openshift/cluster-version-operator/lib/resourceread" + "github.com/openshift/library-go/pkg/manifest" + operatorsv1 "github.com/operator-framework/api/pkg/operators/v1" + monitoringv1 "github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring/v1" + admissionregistrationv1 "k8s.io/api/admissionregistration/v1" + appsv1 "k8s.io/api/apps/v1" + batchv1 "k8s.io/api/batch/v1" + rbacv1 "k8s.io/api/rbac/v1" + apiextensionsv1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1" + g "github.com/onsi/ginkgo/v2" configv1 "github.com/openshift/api/config/v1" clientconfigv1 "github.com/openshift/client-go/config/clientset/versioned" @@ -16,6 +29,158 @@ import ( "k8s.io/client-go/tools/clientcmd" ) +// GetManifestExpectNotFoundError get manifest to check if it is installed +func GetManifestExpectNotFoundError(ms manifest.Manifest) error { + obj, err := resourceread.Read(ms.Raw) + if err != nil { + return err + } + switch typedObject := obj.(type) { + case *imagev1.ImageStream: + client, err := getImageClient() + if err != nil { + return err + } + _, err = client.ImageStreams(typedObject.Namespace).Get(context.TODO(), typedObject.Name, metav1.GetOptions{}) + return err + case *securityv1.SecurityContextConstraints: + client, err := getSecurityClient() + if err != nil { + return err + } + _, err = client.SecurityContextConstraints().Get(context.TODO(), typedObject.Name, metav1.GetOptions{}) + return err + case *operatorsv1.OperatorGroup: + client, err := getOperatorClient() + if err != nil { + return err + } + _, err = client.OperatorGroups(typedObject.Namespace).Get(context.TODO(), typedObject.Name, metav1.GetOptions{}) + return err + case *monitoringv1.PrometheusRule: + client, err := getMonitoringClient() + if err != nil { + return err + } + _, err = client.PrometheusRules(typedObject.Namespace).Get(context.TODO(), typedObject.Name, metav1.GetOptions{}) + return err + case *monitoringv1.ServiceMonitor: + client, err := getMonitoringClient() + if err != nil { + return err + } + _, err = client.ServiceMonitors(typedObject.Namespace).Get(context.TODO(), typedObject.Name, metav1.GetOptions{}) + return err + case *admissionregistrationv1.ValidatingWebhookConfiguration: + client, err := getAdmissionRegistrationClient() + if err != nil { + return err + } + _, err = client.ValidatingWebhookConfigurations().Get(context.TODO(), typedObject.Name, metav1.GetOptions{}) + return err + case *appsv1.DaemonSet: + client, err := getAppsClient() + if err != nil { + return err + } + _, err = client.DaemonSets(typedObject.Namespace).Get(context.TODO(), typedObject.Name, metav1.GetOptions{}) + return err + case *appsv1.Deployment: + client, err := getAppsClient() + if err != nil { + return err + } + _, err = client.Deployments(typedObject.Namespace).Get(context.TODO(), typedObject.Name, metav1.GetOptions{}) + return err + case *batchv1.CronJob: + client, err := getBatchClient() + if err != nil { + return err + } + _, err = client.CronJobs(typedObject.Namespace).Get(context.TODO(), typedObject.Name, metav1.GetOptions{}) + return err + case *batchv1.Job: + client, err := getBatchClient() + if err != nil { + return err + } + _, err = client.Jobs(typedObject.Namespace).Get(context.TODO(), typedObject.Name, metav1.GetOptions{}) + return err + case *corev1.ConfigMap: + client, err := getCoreV1Client() + if err != nil { + return err + } + _, err = client.ConfigMaps(typedObject.Namespace).Get(context.TODO(), typedObject.Name, metav1.GetOptions{}) + return err + case *corev1.Namespace: + client, err := getCoreV1Client() + if err != nil { + return err + } + _, err = client.Namespaces().Get(context.TODO(), typedObject.Name, metav1.GetOptions{}) + return err + case *corev1.Secret: + client, err := getCoreV1Client() + if err != nil { + return err + } + _, err = client.Secrets(typedObject.Namespace).Get(context.TODO(), typedObject.Name, metav1.GetOptions{}) + return err + case *corev1.Service: + client, err := getCoreV1Client() + if err != nil { + return err + } + _, err = client.Services(typedObject.Namespace).Get(context.TODO(), typedObject.Name, metav1.GetOptions{}) + return err + case *corev1.ServiceAccount: + client, err := getCoreV1Client() + if err != nil { + return err + } + _, err = client.ServiceAccounts(typedObject.Namespace).Get(context.TODO(), typedObject.Name, metav1.GetOptions{}) + return err + case *rbacv1.ClusterRole: + client, err := getRbacV1Client() + if err != nil { + return err + } + _, err = client.ClusterRoles().Get(context.TODO(), typedObject.Name, metav1.GetOptions{}) + return err + case *rbacv1.ClusterRoleBinding: + client, err := getRbacV1Client() + if err != nil { + return err + } + _, err = client.ClusterRoleBindings().Get(context.TODO(), typedObject.Name, metav1.GetOptions{}) + return err + case *rbacv1.Role: + client, err := getRbacV1Client() + if err != nil { + return err + } + _, err = client.Roles(typedObject.Namespace).Get(context.TODO(), typedObject.Name, metav1.GetOptions{}) + return err + case *rbacv1.RoleBinding: + client, err := getRbacV1Client() + if err != nil { + return err + } + _, err = client.RoleBindings(typedObject.Namespace).Get(context.TODO(), typedObject.Name, metav1.GetOptions{}) + return err + case *apiextensionsv1.CustomResourceDefinition: + client, err := getApiextV1Client() + if err != nil { + return err + } + _, err = client.CustomResourceDefinitions().Get(context.TODO(), typedObject.Name, metav1.GetOptions{}) + return err + default: + return fmt.Errorf("unrecognized manifest type: %T", obj) + } +} + // IsHypershift checks if running on a HyperShift hosted cluster // Refer to https://github.com/openshift/origin/blob/31704414237b8bd5c66ad247c105c94abc9470b1/test/extended/util/framework.go#L2301 func IsHypershift(ctx context.Context, restConfig *rest.Config) (bool, error) { diff --git a/vendor/github.com/emicklei/go-restful/v3/.travis.yml b/vendor/github.com/emicklei/go-restful/v3/.travis.yml deleted file mode 100644 index 3a0bf5ff1b..0000000000 --- a/vendor/github.com/emicklei/go-restful/v3/.travis.yml +++ /dev/null @@ -1,13 +0,0 @@ -language: go - -go: - - 1.x - -before_install: - - go test -v - -script: - - go test -race -coverprofile=coverage.txt -covermode=atomic - -after_success: - - bash <(curl -s https://codecov.io/bash) \ No newline at end of file diff --git a/vendor/github.com/emicklei/go-restful/v3/CHANGES.md b/vendor/github.com/emicklei/go-restful/v3/CHANGES.md index 6f24dfff56..4fcd920abe 100644 --- a/vendor/github.com/emicklei/go-restful/v3/CHANGES.md +++ b/vendor/github.com/emicklei/go-restful/v3/CHANGES.md @@ -1,5 +1,9 @@ # Change history of go-restful +## [v3.13.0] - 2025-08-14 + +- optimize performance of path matching in CurlyRouter ( thanks @wenhuang, Wen Huang) + ## [v3.12.2] - 2025-02-21 - allow empty payloads in post,put,patch, issue #580 ( thanks @liggitt, Jordan Liggitt) diff --git a/vendor/github.com/emicklei/go-restful/v3/README.md b/vendor/github.com/emicklei/go-restful/v3/README.md index 3fb40d1980..50a79ab692 100644 --- a/vendor/github.com/emicklei/go-restful/v3/README.md +++ b/vendor/github.com/emicklei/go-restful/v3/README.md @@ -84,6 +84,7 @@ func (u UserResource) findUser(request *restful.Request, response *restful.Respo - Configurable (trace) logging - Customizable gzip/deflate readers and writers using CompressorProvider registration - Inject your own http.Handler using the `HttpMiddlewareHandlerToFilter` function +- Added `SetPathTokenCacheEnabled` and `SetCustomVerbCacheEnabled` to disable regexp caching (default=true) ## How to customize There are several hooks to customize the behavior of the go-restful package. diff --git a/vendor/github.com/emicklei/go-restful/v3/curly.go b/vendor/github.com/emicklei/go-restful/v3/curly.go index 6fd2bcd5a1..eec43bfd06 100644 --- a/vendor/github.com/emicklei/go-restful/v3/curly.go +++ b/vendor/github.com/emicklei/go-restful/v3/curly.go @@ -9,11 +9,35 @@ import ( "regexp" "sort" "strings" + "sync" ) // CurlyRouter expects Routes with paths that contain zero or more parameters in curly brackets. type CurlyRouter struct{} +var ( + regexCache sync.Map // Cache for compiled regex patterns + pathTokenCacheEnabled = true // Enable/disable path token regex caching +) + +// SetPathTokenCacheEnabled enables or disables path token regex caching for CurlyRouter. +// When disabled, regex patterns will be compiled on every request. +// When enabled (default), compiled regex patterns are cached for better performance. +func SetPathTokenCacheEnabled(enabled bool) { + pathTokenCacheEnabled = enabled +} + +// getCachedRegexp retrieves a compiled regex from the cache if found and valid. +// Returns the regex and true if found and valid, nil and false otherwise. +func getCachedRegexp(cache *sync.Map, pattern string) (*regexp.Regexp, bool) { + if cached, found := cache.Load(pattern); found { + if regex, ok := cached.(*regexp.Regexp); ok { + return regex, true + } + } + return nil, false +} + // SelectRoute is part of the Router interface and returns the best match // for the WebService and its Route for the given Request. func (c CurlyRouter) SelectRoute( @@ -113,8 +137,28 @@ func (c CurlyRouter) regularMatchesPathToken(routeToken string, colon int, reque } return true, true } - matched, err := regexp.MatchString(regPart, requestToken) - return (matched && err == nil), false + + // Check cache first (if enabled) + if pathTokenCacheEnabled { + if regex, found := getCachedRegexp(®exCache, regPart); found { + matched := regex.MatchString(requestToken) + return matched, false + } + } + + // Compile the regex + regex, err := regexp.Compile(regPart) + if err != nil { + return false, false + } + + // Cache the regex (if enabled) + if pathTokenCacheEnabled { + regexCache.Store(regPart, regex) + } + + matched := regex.MatchString(requestToken) + return matched, false } var jsr311Router = RouterJSR311{} @@ -168,7 +212,7 @@ func (c CurlyRouter) computeWebserviceScore(requestTokens []string, routeTokens if matchesToken { score++ // extra score for regex match } - } + } } else { // not a parameter if eachRequestToken != eachRouteToken { diff --git a/vendor/github.com/emicklei/go-restful/v3/custom_verb.go b/vendor/github.com/emicklei/go-restful/v3/custom_verb.go index bfc17efde8..0b98eeb091 100644 --- a/vendor/github.com/emicklei/go-restful/v3/custom_verb.go +++ b/vendor/github.com/emicklei/go-restful/v3/custom_verb.go @@ -1,14 +1,28 @@ package restful +// Copyright 2025 Ernest Micklei. All rights reserved. +// Use of this source code is governed by a license +// that can be found in the LICENSE file. + import ( "fmt" "regexp" + "sync" ) var ( - customVerbReg = regexp.MustCompile(":([A-Za-z]+)$") + customVerbReg = regexp.MustCompile(":([A-Za-z]+)$") + customVerbCache sync.Map // Cache for compiled custom verb regexes + customVerbCacheEnabled = true // Enable/disable custom verb regex caching ) +// SetCustomVerbCacheEnabled enables or disables custom verb regex caching. +// When disabled, custom verb regex patterns will be compiled on every request. +// When enabled (default), compiled custom verb regex patterns are cached for better performance. +func SetCustomVerbCacheEnabled(enabled bool) { + customVerbCacheEnabled = enabled +} + func hasCustomVerb(routeToken string) bool { return customVerbReg.MatchString(routeToken) } @@ -20,7 +34,23 @@ func isMatchCustomVerb(routeToken string, pathToken string) bool { } customVerb := rs[1] - specificVerbReg := regexp.MustCompile(fmt.Sprintf(":%s$", customVerb)) + regexPattern := fmt.Sprintf(":%s$", customVerb) + + // Check cache first (if enabled) + if customVerbCacheEnabled { + if specificVerbReg, found := getCachedRegexp(&customVerbCache, regexPattern); found { + return specificVerbReg.MatchString(pathToken) + } + } + + // Compile the regex + specificVerbReg := regexp.MustCompile(regexPattern) + + // Cache the regex (if enabled) + if customVerbCacheEnabled { + customVerbCache.Store(regexPattern, specificVerbReg) + } + return specificVerbReg.MatchString(pathToken) } diff --git a/vendor/github.com/emicklei/go-restful/v3/doc.go b/vendor/github.com/emicklei/go-restful/v3/doc.go index 69b13057d0..80809225b8 100644 --- a/vendor/github.com/emicklei/go-restful/v3/doc.go +++ b/vendor/github.com/emicklei/go-restful/v3/doc.go @@ -1,7 +1,7 @@ /* Package restful , a lean package for creating REST-style WebServices without magic. -WebServices and Routes +### WebServices and Routes A WebService has a collection of Route objects that dispatch incoming Http Requests to a function calls. Typically, a WebService has a root path (e.g. /users) and defines common MIME types for its routes. @@ -30,14 +30,14 @@ The (*Request, *Response) arguments provide functions for reading information fr See the example https://github.com/emicklei/go-restful/blob/v3/examples/user-resource/restful-user-resource.go with a full implementation. -Regular expression matching Routes +### Regular expression matching Routes A Route parameter can be specified using the format "uri/{var[:regexp]}" or the special version "uri/{var:*}" for matching the tail of the path. For example, /persons/{name:[A-Z][A-Z]} can be used to restrict values for the parameter "name" to only contain capital alphabetic characters. Regular expressions must use the standard Go syntax as described in the regexp package. (https://code.google.com/p/re2/wiki/Syntax) This feature requires the use of a CurlyRouter. -Containers +### Containers A Container holds a collection of WebServices, Filters and a http.ServeMux for multiplexing http requests. Using the statements "restful.Add(...) and restful.Filter(...)" will register WebServices and Filters to the Default Container. @@ -47,7 +47,7 @@ You can create your own Container and create a new http.Server for that particul container := restful.NewContainer() server := &http.Server{Addr: ":8081", Handler: container} -Filters +### Filters A filter dynamically intercepts requests and responses to transform or use the information contained in the requests or responses. You can use filters to perform generic logging, measurement, authentication, redirect, set response headers etc. @@ -60,22 +60,21 @@ Use the following statement to pass the request,response pair to the next filter chain.ProcessFilter(req, resp) -Container Filters +### Container Filters These are processed before any registered WebService. // install a (global) filter for the default container (processed before any webservice) restful.Filter(globalLogging) -WebService Filters +### WebService Filters These are processed before any Route of a WebService. // install a webservice filter (processed before any route) ws.Filter(webserviceLogging).Filter(measureTime) - -Route Filters +### Route Filters These are processed before calling the function associated with the Route. @@ -84,7 +83,7 @@ These are processed before calling the function associated with the Route. See the example https://github.com/emicklei/go-restful/blob/v3/examples/filters/restful-filters.go with full implementations. -Response Encoding +### Response Encoding Two encodings are supported: gzip and deflate. To enable this for all responses: @@ -95,20 +94,20 @@ Alternatively, you can create a Filter that performs the encoding and install it See the example https://github.com/emicklei/go-restful/blob/v3/examples/encoding/restful-encoding-filter.go -OPTIONS support +### OPTIONS support By installing a pre-defined container filter, your Webservice(s) can respond to the OPTIONS Http request. Filter(OPTIONSFilter()) -CORS +### CORS By installing the filter of a CrossOriginResourceSharing (CORS), your WebService(s) can handle CORS requests. cors := CrossOriginResourceSharing{ExposeHeaders: []string{"X-My-Header"}, CookiesAllowed: false, Container: DefaultContainer} Filter(cors.Filter) -Error Handling +### Error Handling Unexpected things happen. If a request cannot be processed because of a failure, your service needs to tell via the response what happened and why. For this reason HTTP status codes exist and it is important to use the correct code in every exceptional situation. @@ -137,11 +136,11 @@ The request does not have or has an unknown Accept Header set for this operation The request does not have or has an unknown Content-Type Header set for this operation. -ServiceError +### ServiceError In addition to setting the correct (error) Http status code, you can choose to write a ServiceError message on the response. -Performance options +### Performance options This package has several options that affect the performance of your service. It is important to understand them and how you can change it. @@ -156,30 +155,27 @@ Default value is true If content encoding is enabled then the default strategy for getting new gzip/zlib writers and readers is to use a sync.Pool. Because writers are expensive structures, performance is even more improved when using a preloaded cache. You can also inject your own implementation. -Trouble shooting +### Trouble shooting This package has the means to produce detail logging of the complete Http request matching process and filter invocation. Enabling this feature requires you to set an implementation of restful.StdLogger (e.g. log.Logger) instance such as: restful.TraceLogger(log.New(os.Stdout, "[restful] ", log.LstdFlags|log.Lshortfile)) -Logging +### Logging The restful.SetLogger() method allows you to override the logger used by the package. By default restful uses the standard library `log` package and logs to stdout. Different logging packages are supported as long as they conform to `StdLogger` interface defined in the `log` sub-package, writing an adapter for your preferred package is simple. -Resources +### Resources -[project]: https://github.com/emicklei/go-restful +(c) 2012-2025, http://ernestmicklei.com. MIT License +[project]: https://github.com/emicklei/go-restful [examples]: https://github.com/emicklei/go-restful/blob/master/examples - -[design]: http://ernestmicklei.com/2012/11/11/go-restful-api-design/ - +[design]: http://ernestmicklei.com/2012/11/11/go-restful-api-design/ [showcases]: https://github.com/emicklei/mora, https://github.com/emicklei/landskape - -(c) 2012-2015, http://ernestmicklei.com. MIT License */ package restful diff --git a/vendor/github.com/go-logr/logr/.golangci.yaml b/vendor/github.com/go-logr/logr/.golangci.yaml index 0cffafa7bf..0ed62c1a18 100644 --- a/vendor/github.com/go-logr/logr/.golangci.yaml +++ b/vendor/github.com/go-logr/logr/.golangci.yaml @@ -1,26 +1,28 @@ +version: "2" + run: timeout: 1m tests: true linters: - disable-all: true - enable: + default: none + enable: # please keep this alphabetized + - asasalint - asciicheck + - copyloopvar + - dupl - errcheck - forcetypeassert + - goconst - gocritic - - gofmt - - goimports - - gosimple - govet - ineffassign - misspell + - musttag - revive - staticcheck - - typecheck - unused issues: - exclude-use-default: false max-issues-per-linter: 0 max-same-issues: 10 diff --git a/vendor/github.com/go-logr/logr/funcr/funcr.go b/vendor/github.com/go-logr/logr/funcr/funcr.go index 30568e768d..b22c57d713 100644 --- a/vendor/github.com/go-logr/logr/funcr/funcr.go +++ b/vendor/github.com/go-logr/logr/funcr/funcr.go @@ -77,7 +77,7 @@ func newSink(fn func(prefix, args string), formatter Formatter) logr.LogSink { write: fn, } // For skipping fnlogger.Info and fnlogger.Error. - l.Formatter.AddCallDepth(1) + l.AddCallDepth(1) // via Formatter return l } @@ -164,17 +164,17 @@ type fnlogger struct { } func (l fnlogger) WithName(name string) logr.LogSink { - l.Formatter.AddName(name) + l.AddName(name) // via Formatter return &l } func (l fnlogger) WithValues(kvList ...any) logr.LogSink { - l.Formatter.AddValues(kvList) + l.AddValues(kvList) // via Formatter return &l } func (l fnlogger) WithCallDepth(depth int) logr.LogSink { - l.Formatter.AddCallDepth(depth) + l.AddCallDepth(depth) // via Formatter return &l } diff --git a/vendor/github.com/go-openapi/jsonpointer/.golangci.yml b/vendor/github.com/go-openapi/jsonpointer/.golangci.yml index 22f8d21cca..7cea1af8b5 100644 --- a/vendor/github.com/go-openapi/jsonpointer/.golangci.yml +++ b/vendor/github.com/go-openapi/jsonpointer/.golangci.yml @@ -1,61 +1,75 @@ -linters-settings: - govet: - check-shadowing: true - golint: - min-confidence: 0 - gocyclo: - min-complexity: 45 - maligned: - suggest-new: true - dupl: - threshold: 200 - goconst: - min-len: 2 - min-occurrences: 3 - +version: "2" linters: - enable-all: true + default: all disable: - - maligned - - unparam - - lll - - gochecknoinits - - gochecknoglobals + - cyclop + - depguard + - errchkjson + - errorlint + - exhaustruct + - forcetypeassert - funlen - - godox + - gochecknoglobals + - gochecknoinits - gocognit - - whitespace - - wsl - - wrapcheck - - testpackage - - nlreturn - - gomnd - - exhaustivestruct - - goerr113 - - errorlint - - nestif - godot - - gofumpt + - godox + - gosmopolitan + - inamedparam + #- intrange # disabled while < go1.22 + - ireturn + - lll + - musttag + - nestif + - nlreturn + - nonamedreturns + - noinlineerr - paralleltest - - tparallel + - recvcheck + - testpackage - thelper - - ifshort - - exhaustruct + - tparallel + - unparam - varnamelen - - gci - - depguard - - errchkjson - - inamedparam - - nonamedreturns - - musttag - - ireturn - - forcetypeassert - - cyclop - # deprecated linters - - deadcode - - interfacer - - scopelint - - varcheck - - structcheck - - golint - - nosnakecase + - whitespace + - wrapcheck + - wsl + - wsl_v5 + settings: + dupl: + threshold: 200 + goconst: + min-len: 2 + min-occurrences: 3 + gocyclo: + min-complexity: 45 + exclusions: + generated: lax + presets: + - comments + - common-false-positives + - legacy + - std-error-handling + paths: + - third_party$ + - builtin$ + - examples$ +formatters: + enable: + - gofmt + - goimports + exclusions: + generated: lax + paths: + - third_party$ + - builtin$ + - examples$ +issues: + # Maximum issues count per one linter. + # Set to 0 to disable. + # Default: 50 + max-issues-per-linter: 0 + # Maximum count of issues with the same text. + # Set to 0 to disable. + # Default: 3 + max-same-issues: 0 diff --git a/vendor/github.com/go-openapi/jsonpointer/README.md b/vendor/github.com/go-openapi/jsonpointer/README.md index 0108f1d572..45bd31b14f 100644 --- a/vendor/github.com/go-openapi/jsonpointer/README.md +++ b/vendor/github.com/go-openapi/jsonpointer/README.md @@ -13,7 +13,14 @@ Completed YES Tested YES ## References -http://tools.ietf.org/html/draft-ietf-appsawg-json-pointer-07 + + + +also known as [RFC6901](https://www.rfc-editor.org/rfc/rfc6901) ### Note + The 4.Evaluation part of the previous reference, starting with 'If the currently referenced value is a JSON array, the reference token MUST contain either...' is not implemented. + +That is because our implementation of the JSON pointer only supports explicit references to array elements: the provision in the spec +to resolve non-existent members as "the last element in the array", using the special trailing character "-". diff --git a/vendor/github.com/go-openapi/jsonpointer/errors.go b/vendor/github.com/go-openapi/jsonpointer/errors.go new file mode 100644 index 0000000000..b84343d9d7 --- /dev/null +++ b/vendor/github.com/go-openapi/jsonpointer/errors.go @@ -0,0 +1,18 @@ +package jsonpointer + +type pointerError string + +func (e pointerError) Error() string { + return string(e) +} + +const ( + // ErrPointer is an error raised by the jsonpointer package + ErrPointer pointerError = "JSON pointer error" + + // ErrInvalidStart states that a JSON pointer must start with a separator ("/") + ErrInvalidStart pointerError = `JSON pointer must be empty or start with a "` + pointerSeparator + + // ErrUnsupportedValueType indicates that a value of the wrong type is being set + ErrUnsupportedValueType pointerError = "only structs, pointers, maps and slices are supported for setting values" +) diff --git a/vendor/github.com/go-openapi/jsonpointer/pointer.go b/vendor/github.com/go-openapi/jsonpointer/pointer.go index d970c7cf44..7513c4763b 100644 --- a/vendor/github.com/go-openapi/jsonpointer/pointer.go +++ b/vendor/github.com/go-openapi/jsonpointer/pointer.go @@ -33,19 +33,18 @@ import ( "strconv" "strings" - "github.com/go-openapi/swag" + "github.com/go-openapi/swag/jsonname" ) const ( emptyPointer = `` pointerSeparator = `/` - - invalidStart = `JSON pointer must be empty or start with a "` + pointerSeparator - notFound = `Can't find the pointer in the document` ) -var jsonPointableType = reflect.TypeOf(new(JSONPointable)).Elem() -var jsonSetableType = reflect.TypeOf(new(JSONSetable)).Elem() +var ( + jsonPointableType = reflect.TypeOf(new(JSONPointable)).Elem() + jsonSetableType = reflect.TypeOf(new(JSONSetable)).Elem() +) // JSONPointable is an interface for structs to implement when they need to customize the // json pointer process @@ -59,181 +58,104 @@ type JSONSetable interface { JSONSet(string, any) error } -// New creates a new json pointer for the given string -func New(jsonPointerString string) (Pointer, error) { - - var p Pointer - err := p.parse(jsonPointerString) - return p, err - -} - -// Pointer the json pointer reprsentation +// Pointer is a representation of a json pointer type Pointer struct { referenceTokens []string } -// "Constructor", parses the given string JSON pointer -func (p *Pointer) parse(jsonPointerString string) error { - - var err error - - if jsonPointerString != emptyPointer { - if !strings.HasPrefix(jsonPointerString, pointerSeparator) { - err = errors.New(invalidStart) - } else { - referenceTokens := strings.Split(jsonPointerString, pointerSeparator) - p.referenceTokens = append(p.referenceTokens, referenceTokens[1:]...) - } - } +// New creates a new json pointer for the given string +func New(jsonPointerString string) (Pointer, error) { + var p Pointer + err := p.parse(jsonPointerString) - return err + return p, err } // Get uses the pointer to retrieve a value from a JSON document func (p *Pointer) Get(document any) (any, reflect.Kind, error) { - return p.get(document, swag.DefaultJSONNameProvider) + return p.get(document, jsonname.DefaultJSONNameProvider) } // Set uses the pointer to set a value from a JSON document func (p *Pointer) Set(document any, value any) (any, error) { - return document, p.set(document, value, swag.DefaultJSONNameProvider) -} - -// GetForToken gets a value for a json pointer token 1 level deep -func GetForToken(document any, decodedToken string) (any, reflect.Kind, error) { - return getSingleImpl(document, decodedToken, swag.DefaultJSONNameProvider) -} - -// SetForToken gets a value for a json pointer token 1 level deep -func SetForToken(document any, decodedToken string, value any) (any, error) { - return document, setSingleImpl(document, value, decodedToken, swag.DefaultJSONNameProvider) + return document, p.set(document, value, jsonname.DefaultJSONNameProvider) } -func isNil(input any) bool { - if input == nil { - return true +// DecodedTokens returns the decoded tokens of this JSON pointer +func (p *Pointer) DecodedTokens() []string { + result := make([]string, 0, len(p.referenceTokens)) + for _, t := range p.referenceTokens { + result = append(result, Unescape(t)) } + return result +} - kind := reflect.TypeOf(input).Kind() - switch kind { //nolint:exhaustive - case reflect.Ptr, reflect.Map, reflect.Slice, reflect.Chan: - return reflect.ValueOf(input).IsNil() - default: - return false - } +// IsEmpty returns true if this is an empty json pointer +// this indicates that it points to the root document +func (p *Pointer) IsEmpty() bool { + return len(p.referenceTokens) == 0 } -func getSingleImpl(node any, decodedToken string, nameProvider *swag.NameProvider) (any, reflect.Kind, error) { - rValue := reflect.Indirect(reflect.ValueOf(node)) - kind := rValue.Kind() - if isNil(node) { - return nil, kind, fmt.Errorf("nil value has not field %q", decodedToken) - } +// Pointer to string representation function +func (p *Pointer) String() string { - switch typed := node.(type) { - case JSONPointable: - r, err := typed.JSONLookup(decodedToken) - if err != nil { - return nil, kind, err - } - return r, kind, nil - case *any: // case of a pointer to interface, that is not resolved by reflect.Indirect - return getSingleImpl(*typed, decodedToken, nameProvider) + if len(p.referenceTokens) == 0 { + return emptyPointer } - switch kind { //nolint:exhaustive - case reflect.Struct: - nm, ok := nameProvider.GetGoNameForType(rValue.Type(), decodedToken) - if !ok { - return nil, kind, fmt.Errorf("object has no field %q", decodedToken) - } - fld := rValue.FieldByName(nm) - return fld.Interface(), kind, nil - - case reflect.Map: - kv := reflect.ValueOf(decodedToken) - mv := rValue.MapIndex(kv) - - if mv.IsValid() { - return mv.Interface(), kind, nil - } - return nil, kind, fmt.Errorf("object has no key %q", decodedToken) + return pointerSeparator + strings.Join(p.referenceTokens, pointerSeparator) +} - case reflect.Slice: - tokenIndex, err := strconv.Atoi(decodedToken) +func (p *Pointer) Offset(document string) (int64, error) { + dec := json.NewDecoder(strings.NewReader(document)) + var offset int64 + for _, ttk := range p.DecodedTokens() { + tk, err := dec.Token() if err != nil { - return nil, kind, err + return 0, err } - sLength := rValue.Len() - if tokenIndex < 0 || tokenIndex >= sLength { - return nil, kind, fmt.Errorf("index out of bounds array[0,%d] index '%d'", sLength-1, tokenIndex) + switch tk := tk.(type) { + case json.Delim: + switch tk { + case '{': + offset, err = offsetSingleObject(dec, ttk) + if err != nil { + return 0, err + } + case '[': + offset, err = offsetSingleArray(dec, ttk) + if err != nil { + return 0, err + } + default: + return 0, fmt.Errorf("invalid token %#v: %w", tk, ErrPointer) + } + default: + return 0, fmt.Errorf("invalid token %#v: %w", tk, ErrPointer) } - - elem := rValue.Index(tokenIndex) - return elem.Interface(), kind, nil - - default: - return nil, kind, fmt.Errorf("invalid token reference %q", decodedToken) } - + return offset, nil } -func setSingleImpl(node, data any, decodedToken string, nameProvider *swag.NameProvider) error { - rValue := reflect.Indirect(reflect.ValueOf(node)) - - if ns, ok := node.(JSONSetable); ok { // pointer impl - return ns.JSONSet(decodedToken, data) - } - - if rValue.Type().Implements(jsonSetableType) { - return node.(JSONSetable).JSONSet(decodedToken, data) - } - - switch rValue.Kind() { //nolint:exhaustive - case reflect.Struct: - nm, ok := nameProvider.GetGoNameForType(rValue.Type(), decodedToken) - if !ok { - return fmt.Errorf("object has no field %q", decodedToken) - } - fld := rValue.FieldByName(nm) - if fld.IsValid() { - fld.Set(reflect.ValueOf(data)) - } - return nil - - case reflect.Map: - kv := reflect.ValueOf(decodedToken) - rValue.SetMapIndex(kv, reflect.ValueOf(data)) - return nil - - case reflect.Slice: - tokenIndex, err := strconv.Atoi(decodedToken) - if err != nil { - return err - } - sLength := rValue.Len() - if tokenIndex < 0 || tokenIndex >= sLength { - return fmt.Errorf("index out of bounds array[0,%d] index '%d'", sLength, tokenIndex) - } +// "Constructor", parses the given string JSON pointer +func (p *Pointer) parse(jsonPointerString string) error { + var err error - elem := rValue.Index(tokenIndex) - if !elem.CanSet() { - return fmt.Errorf("can't set slice index %s to %v", decodedToken, data) + if jsonPointerString != emptyPointer { + if !strings.HasPrefix(jsonPointerString, pointerSeparator) { + err = errors.Join(ErrInvalidStart, ErrPointer) + } else { + referenceTokens := strings.Split(jsonPointerString, pointerSeparator) + p.referenceTokens = append(p.referenceTokens, referenceTokens[1:]...) } - elem.Set(reflect.ValueOf(data)) - return nil - - default: - return fmt.Errorf("invalid token reference %q", decodedToken) } + return err } -func (p *Pointer) get(node any, nameProvider *swag.NameProvider) (any, reflect.Kind, error) { - +func (p *Pointer) get(node any, nameProvider *jsonname.NameProvider) (any, reflect.Kind, error) { if nameProvider == nil { - nameProvider = swag.DefaultJSONNameProvider + nameProvider = jsonname.DefaultJSONNameProvider } kind := reflect.Invalid @@ -244,7 +166,6 @@ func (p *Pointer) get(node any, nameProvider *swag.NameProvider) (any, reflect.K } for _, token := range p.referenceTokens { - decodedToken := Unescape(token) r, knd, err := getSingleImpl(node, decodedToken, nameProvider) @@ -260,15 +181,18 @@ func (p *Pointer) get(node any, nameProvider *swag.NameProvider) (any, reflect.K return node, kind, nil } -func (p *Pointer) set(node, data any, nameProvider *swag.NameProvider) error { +func (p *Pointer) set(node, data any, nameProvider *jsonname.NameProvider) error { knd := reflect.ValueOf(node).Kind() - if knd != reflect.Ptr && knd != reflect.Struct && knd != reflect.Map && knd != reflect.Slice && knd != reflect.Array { - return errors.New("only structs, pointers, maps and slices are supported for setting values") + if knd != reflect.Pointer && knd != reflect.Struct && knd != reflect.Map && knd != reflect.Slice && knd != reflect.Array { + return errors.Join( + ErrUnsupportedValueType, + ErrPointer, + ) } if nameProvider == nil { - nameProvider = swag.DefaultJSONNameProvider + nameProvider = jsonname.DefaultJSONNameProvider } // Full document when empty @@ -286,6 +210,11 @@ func (p *Pointer) set(node, data any, nameProvider *swag.NameProvider) error { return setSingleImpl(node, data, decodedToken, nameProvider) } + // Check for nil during traversal + if isNil(node) { + return fmt.Errorf("cannot traverse through nil value at %q: %w", decodedToken, ErrPointer) + } + rValue := reflect.Indirect(reflect.ValueOf(node)) kind := rValue.Kind() @@ -295,7 +224,7 @@ func (p *Pointer) set(node, data any, nameProvider *swag.NameProvider) error { return err } fld := reflect.ValueOf(r) - if fld.CanAddr() && fld.Kind() != reflect.Interface && fld.Kind() != reflect.Map && fld.Kind() != reflect.Slice && fld.Kind() != reflect.Ptr { + if fld.CanAddr() && fld.Kind() != reflect.Interface && fld.Kind() != reflect.Map && fld.Kind() != reflect.Slice && fld.Kind() != reflect.Pointer { node = fld.Addr().Interface() continue } @@ -307,10 +236,10 @@ func (p *Pointer) set(node, data any, nameProvider *swag.NameProvider) error { case reflect.Struct: nm, ok := nameProvider.GetGoNameForType(rValue.Type(), decodedToken) if !ok { - return fmt.Errorf("object has no field %q", decodedToken) + return fmt.Errorf("object has no field %q: %w", decodedToken, ErrPointer) } fld := rValue.FieldByName(nm) - if fld.CanAddr() && fld.Kind() != reflect.Interface && fld.Kind() != reflect.Map && fld.Kind() != reflect.Slice && fld.Kind() != reflect.Ptr { + if fld.CanAddr() && fld.Kind() != reflect.Interface && fld.Kind() != reflect.Map && fld.Kind() != reflect.Slice && fld.Kind() != reflect.Pointer { node = fld.Addr().Interface() continue } @@ -321,9 +250,9 @@ func (p *Pointer) set(node, data any, nameProvider *swag.NameProvider) error { mv := rValue.MapIndex(kv) if !mv.IsValid() { - return fmt.Errorf("object has no key %q", decodedToken) + return fmt.Errorf("object has no key %q: %w", decodedToken, ErrPointer) } - if mv.CanAddr() && mv.Kind() != reflect.Interface && mv.Kind() != reflect.Map && mv.Kind() != reflect.Slice && mv.Kind() != reflect.Ptr { + if mv.CanAddr() && mv.Kind() != reflect.Interface && mv.Kind() != reflect.Map && mv.Kind() != reflect.Slice && mv.Kind() != reflect.Pointer { node = mv.Addr().Interface() continue } @@ -336,81 +265,155 @@ func (p *Pointer) set(node, data any, nameProvider *swag.NameProvider) error { } sLength := rValue.Len() if tokenIndex < 0 || tokenIndex >= sLength { - return fmt.Errorf("index out of bounds array[0,%d] index '%d'", sLength, tokenIndex) + return fmt.Errorf("index out of bounds array[0,%d] index '%d': %w", sLength, tokenIndex, ErrPointer) } elem := rValue.Index(tokenIndex) - if elem.CanAddr() && elem.Kind() != reflect.Interface && elem.Kind() != reflect.Map && elem.Kind() != reflect.Slice && elem.Kind() != reflect.Ptr { + if elem.CanAddr() && elem.Kind() != reflect.Interface && elem.Kind() != reflect.Map && elem.Kind() != reflect.Slice && elem.Kind() != reflect.Pointer { node = elem.Addr().Interface() continue } node = elem.Interface() default: - return fmt.Errorf("invalid token reference %q", decodedToken) + return fmt.Errorf("invalid token reference %q: %w", decodedToken, ErrPointer) } - } return nil } -// DecodedTokens returns the decoded tokens -func (p *Pointer) DecodedTokens() []string { - result := make([]string, 0, len(p.referenceTokens)) - for _, t := range p.referenceTokens { - result = append(result, Unescape(t)) +func isNil(input any) bool { + if input == nil { + return true + } + + kind := reflect.TypeOf(input).Kind() + switch kind { //nolint:exhaustive + case reflect.Pointer, reflect.Map, reflect.Slice, reflect.Chan: + return reflect.ValueOf(input).IsNil() + default: + return false } - return result } -// IsEmpty returns true if this is an empty json pointer -// this indicates that it points to the root document -func (p *Pointer) IsEmpty() bool { - return len(p.referenceTokens) == 0 +// GetForToken gets a value for a json pointer token 1 level deep +func GetForToken(document any, decodedToken string) (any, reflect.Kind, error) { + return getSingleImpl(document, decodedToken, jsonname.DefaultJSONNameProvider) } -// Pointer to string representation function -func (p *Pointer) String() string { +// SetForToken gets a value for a json pointer token 1 level deep +func SetForToken(document any, decodedToken string, value any) (any, error) { + return document, setSingleImpl(document, value, decodedToken, jsonname.DefaultJSONNameProvider) +} - if len(p.referenceTokens) == 0 { - return emptyPointer +func getSingleImpl(node any, decodedToken string, nameProvider *jsonname.NameProvider) (any, reflect.Kind, error) { + rValue := reflect.Indirect(reflect.ValueOf(node)) + kind := rValue.Kind() + if isNil(node) { + return nil, kind, fmt.Errorf("nil value has no field %q: %w", decodedToken, ErrPointer) } - pointerString := pointerSeparator + strings.Join(p.referenceTokens, pointerSeparator) + switch typed := node.(type) { + case JSONPointable: + r, err := typed.JSONLookup(decodedToken) + if err != nil { + return nil, kind, err + } + return r, kind, nil + case *any: // case of a pointer to interface, that is not resolved by reflect.Indirect + return getSingleImpl(*typed, decodedToken, nameProvider) + } - return pointerString + switch kind { //nolint:exhaustive + case reflect.Struct: + nm, ok := nameProvider.GetGoNameForType(rValue.Type(), decodedToken) + if !ok { + return nil, kind, fmt.Errorf("object has no field %q: %w", decodedToken, ErrPointer) + } + fld := rValue.FieldByName(nm) + return fld.Interface(), kind, nil + + case reflect.Map: + kv := reflect.ValueOf(decodedToken) + mv := rValue.MapIndex(kv) + + if mv.IsValid() { + return mv.Interface(), kind, nil + } + return nil, kind, fmt.Errorf("object has no key %q: %w", decodedToken, ErrPointer) + + case reflect.Slice: + tokenIndex, err := strconv.Atoi(decodedToken) + if err != nil { + return nil, kind, err + } + sLength := rValue.Len() + if tokenIndex < 0 || tokenIndex >= sLength { + return nil, kind, fmt.Errorf("index out of bounds array[0,%d] index '%d': %w", sLength-1, tokenIndex, ErrPointer) + } + + elem := rValue.Index(tokenIndex) + return elem.Interface(), kind, nil + + default: + return nil, kind, fmt.Errorf("invalid token reference %q: %w", decodedToken, ErrPointer) + } } -func (p *Pointer) Offset(document string) (int64, error) { - dec := json.NewDecoder(strings.NewReader(document)) - var offset int64 - for _, ttk := range p.DecodedTokens() { - tk, err := dec.Token() +func setSingleImpl(node, data any, decodedToken string, nameProvider *jsonname.NameProvider) error { + rValue := reflect.Indirect(reflect.ValueOf(node)) + + // Check for nil to prevent panic when calling rValue.Type() + if isNil(node) { + return fmt.Errorf("cannot set field %q on nil value: %w", decodedToken, ErrPointer) + } + + if ns, ok := node.(JSONSetable); ok { // pointer impl + return ns.JSONSet(decodedToken, data) + } + + if rValue.Type().Implements(jsonSetableType) { + return node.(JSONSetable).JSONSet(decodedToken, data) + } + + switch rValue.Kind() { //nolint:exhaustive + case reflect.Struct: + nm, ok := nameProvider.GetGoNameForType(rValue.Type(), decodedToken) + if !ok { + return fmt.Errorf("object has no field %q: %w", decodedToken, ErrPointer) + } + fld := rValue.FieldByName(nm) + if fld.IsValid() { + fld.Set(reflect.ValueOf(data)) + } + return nil + + case reflect.Map: + kv := reflect.ValueOf(decodedToken) + rValue.SetMapIndex(kv, reflect.ValueOf(data)) + return nil + + case reflect.Slice: + tokenIndex, err := strconv.Atoi(decodedToken) if err != nil { - return 0, err + return err } - switch tk := tk.(type) { - case json.Delim: - switch tk { - case '{': - offset, err = offsetSingleObject(dec, ttk) - if err != nil { - return 0, err - } - case '[': - offset, err = offsetSingleArray(dec, ttk) - if err != nil { - return 0, err - } - default: - return 0, fmt.Errorf("invalid token %#v", tk) - } - default: - return 0, fmt.Errorf("invalid token %#v", tk) + sLength := rValue.Len() + if tokenIndex < 0 || tokenIndex >= sLength { + return fmt.Errorf("index out of bounds array[0,%d] index '%d': %w", sLength, tokenIndex, ErrPointer) } + + elem := rValue.Index(tokenIndex) + if !elem.CanSet() { + return fmt.Errorf("can't set slice index %s to %v: %w", decodedToken, data, ErrPointer) + } + elem.Set(reflect.ValueOf(data)) + return nil + + default: + return fmt.Errorf("invalid token reference %q: %w", decodedToken, ErrPointer) } - return offset, nil } func offsetSingleObject(dec *json.Decoder, decodedToken string) (int64, error) { @@ -437,16 +440,16 @@ func offsetSingleObject(dec *json.Decoder, decodedToken string) (int64, error) { return offset, nil } default: - return 0, fmt.Errorf("invalid token %#v", tk) + return 0, fmt.Errorf("invalid token %#v: %w", tk, ErrPointer) } } - return 0, fmt.Errorf("token reference %q not found", decodedToken) + return 0, fmt.Errorf("token reference %q not found: %w", decodedToken, ErrPointer) } func offsetSingleArray(dec *json.Decoder, decodedToken string) (int64, error) { idx, err := strconv.Atoi(decodedToken) if err != nil { - return 0, fmt.Errorf("token reference %q is not a number: %v", decodedToken, err) + return 0, fmt.Errorf("token reference %q is not a number: %v: %w", decodedToken, err, ErrPointer) } var i int for i = 0; i < idx && dec.More(); i++ { @@ -470,7 +473,7 @@ func offsetSingleArray(dec *json.Decoder, decodedToken string) (int64, error) { } if !dec.More() { - return 0, fmt.Errorf("token reference %q not found", decodedToken) + return 0, fmt.Errorf("token reference %q not found: %w", decodedToken, ErrPointer) } return dec.InputOffset(), nil } @@ -516,16 +519,17 @@ const ( decRefTok1 = `/` ) +var ( + encRefTokReplacer = strings.NewReplacer(encRefTok1, decRefTok1, encRefTok0, decRefTok0) + decRefTokReplacer = strings.NewReplacer(decRefTok1, encRefTok1, decRefTok0, encRefTok0) +) + // Unescape unescapes a json pointer reference token string to the original representation func Unescape(token string) string { - step1 := strings.ReplaceAll(token, encRefTok1, decRefTok1) - step2 := strings.ReplaceAll(step1, encRefTok0, decRefTok0) - return step2 + return encRefTokReplacer.Replace(token) } // Escape escapes a pointer reference token string func Escape(token string) string { - step1 := strings.ReplaceAll(token, decRefTok0, encRefTok0) - step2 := strings.ReplaceAll(step1, decRefTok1, encRefTok1) - return step2 + return decRefTokReplacer.Replace(token) } diff --git a/vendor/github.com/go-openapi/jsonreference/.golangci.yml b/vendor/github.com/go-openapi/jsonreference/.golangci.yml index 013fc1943a..7cea1af8b5 100644 --- a/vendor/github.com/go-openapi/jsonreference/.golangci.yml +++ b/vendor/github.com/go-openapi/jsonreference/.golangci.yml @@ -1,50 +1,75 @@ -linters-settings: - govet: - check-shadowing: true - gocyclo: - min-complexity: 30 - maligned: - suggest-new: true - dupl: - threshold: 100 - goconst: - min-len: 2 - min-occurrences: 4 - paralleltest: - ignore-missing: true +version: "2" linters: - enable-all: true + default: all disable: - - maligned - - lll - - gochecknoglobals - - godox - - gocognit - - whitespace - - wsl + - cyclop + - depguard + - errchkjson + - errorlint + - exhaustruct + - forcetypeassert - funlen - gochecknoglobals - gochecknoinits - - scopelint - - wrapcheck - - exhaustivestruct - - exhaustive + - gocognit + - godot + - godox + - gosmopolitan + - inamedparam + #- intrange # disabled while < go1.22 + - ireturn + - lll + - musttag + - nestif - nlreturn + - nonamedreturns + - noinlineerr + - paralleltest + - recvcheck - testpackage - - gci - - gofumpt - - goerr113 - - gomnd + - thelper - tparallel - - nestif - - godot - - errorlint - - varcheck - - interfacer - - deadcode - - golint - - ifshort - - structcheck - - nosnakecase + - unparam - varnamelen - - exhaustruct + - whitespace + - wrapcheck + - wsl + - wsl_v5 + settings: + dupl: + threshold: 200 + goconst: + min-len: 2 + min-occurrences: 3 + gocyclo: + min-complexity: 45 + exclusions: + generated: lax + presets: + - comments + - common-false-positives + - legacy + - std-error-handling + paths: + - third_party$ + - builtin$ + - examples$ +formatters: + enable: + - gofmt + - goimports + exclusions: + generated: lax + paths: + - third_party$ + - builtin$ + - examples$ +issues: + # Maximum issues count per one linter. + # Set to 0 to disable. + # Default: 50 + max-issues-per-linter: 0 + # Maximum count of issues with the same text. + # Set to 0 to disable. + # Default: 3 + max-same-issues: 0 diff --git a/vendor/github.com/go-openapi/jsonreference/README.md b/vendor/github.com/go-openapi/jsonreference/README.md index b94753aa52..c7fc2049c1 100644 --- a/vendor/github.com/go-openapi/jsonreference/README.md +++ b/vendor/github.com/go-openapi/jsonreference/README.md @@ -1,15 +1,19 @@ -# gojsonreference [![Build Status](https://travis-ci.org/go-openapi/jsonreference.svg?branch=master)](https://travis-ci.org/go-openapi/jsonreference) [![codecov](https://codecov.io/gh/go-openapi/jsonreference/branch/master/graph/badge.svg)](https://codecov.io/gh/go-openapi/jsonreference) [![Slack Status](https://slackin.goswagger.io/badge.svg)](https://slackin.goswagger.io) +# gojsonreference [![Build Status](https://github.com/go-openapi/jsonreference/actions/workflows/go-test.yml/badge.svg)](https://github.com/go-openapi/jsonreference/actions?query=workflow%3A"go+test") [![codecov](https://codecov.io/gh/go-openapi/jsonreference/branch/master/graph/badge.svg)](https://codecov.io/gh/go-openapi/jsonreference) + +[![Slack Status](https://slackin.goswagger.io/badge.svg)](https://slackin.goswagger.io) +[![license](http://img.shields.io/badge/license-Apache%20v2-orange.svg)](https://raw.githubusercontent.com/go-openapi/jsonreference/master/LICENSE) +[![Go Reference](https://pkg.go.dev/badge/github.com/go-openapi/jsonreference.svg)](https://pkg.go.dev/github.com/go-openapi/jsonreference) +[![Go Report Card](https://goreportcard.com/badge/github.com/go-openapi/jsonreference)](https://goreportcard.com/report/github.com/go-openapi/jsonreference) -[![license](http://img.shields.io/badge/license-Apache%20v2-orange.svg)](https://raw.githubusercontent.com/go-openapi/jsonreference/master/LICENSE) [![GoDoc](https://godoc.org/github.com/go-openapi/jsonreference?status.svg)](http://godoc.org/github.com/go-openapi/jsonreference) An implementation of JSON Reference - Go language ## Status Feature complete. Stable API ## Dependencies -https://github.com/go-openapi/jsonpointer +* https://github.com/go-openapi/jsonpointer ## References -http://tools.ietf.org/html/draft-ietf-appsawg-json-pointer-07 -http://tools.ietf.org/html/draft-pbryan-zyp-json-ref-03 +* http://tools.ietf.org/html/draft-ietf-appsawg-json-pointer-07 +* http://tools.ietf.org/html/draft-pbryan-zyp-json-ref-03 diff --git a/vendor/github.com/go-openapi/jsonreference/reference.go b/vendor/github.com/go-openapi/jsonreference/reference.go index cfdef03e5d..6a1fed5dfe 100644 --- a/vendor/github.com/go-openapi/jsonreference/reference.go +++ b/vendor/github.com/go-openapi/jsonreference/reference.go @@ -38,13 +38,25 @@ const ( fragmentRune = `#` ) +var ErrChildURL = errors.New("child url is nil") + +// Ref represents a json reference object +type Ref struct { + referenceURL *url.URL + referencePointer jsonpointer.Pointer + + HasFullURL bool + HasURLPathOnly bool + HasFragmentOnly bool + HasFileScheme bool + HasFullFilePath bool +} + // New creates a new reference for the given string func New(jsonReferenceString string) (Ref, error) { - var r Ref err := r.parse(jsonReferenceString) return r, err - } // MustCreateRef parses the ref string and panics when it's invalid. @@ -54,19 +66,8 @@ func MustCreateRef(ref string) Ref { if err != nil { panic(err) } - return r -} -// Ref represents a json reference object -type Ref struct { - referenceURL *url.URL - referencePointer jsonpointer.Pointer - - HasFullURL bool - HasURLPathOnly bool - HasFragmentOnly bool - HasFileScheme bool - HasFullFilePath bool + return r } // GetURL gets the URL for this reference @@ -81,7 +82,6 @@ func (r *Ref) GetPointer() *jsonpointer.Pointer { // String returns the best version of the url for this reference func (r *Ref) String() string { - if r.referenceURL != nil { return r.referenceURL.String() } @@ -106,9 +106,27 @@ func (r *Ref) IsCanonical() bool { return (r.HasFileScheme && r.HasFullFilePath) || (!r.HasFileScheme && r.HasFullURL) } +// Inherits creates a new reference from a parent and a child +// If the child cannot inherit from the parent, an error is returned +func (r *Ref) Inherits(child Ref) (*Ref, error) { + childURL := child.GetURL() + parentURL := r.GetURL() + if childURL == nil { + return nil, ErrChildURL + } + if parentURL == nil { + return &child, nil + } + + ref, err := New(parentURL.ResolveReference(childURL).String()) + if err != nil { + return nil, err + } + return &ref, nil +} + // "Constructor", parses the given string JSON reference func (r *Ref) parse(jsonReferenceString string) error { - parsed, err := url.Parse(jsonReferenceString) if err != nil { return err @@ -137,22 +155,3 @@ func (r *Ref) parse(jsonReferenceString string) error { return nil } - -// Inherits creates a new reference from a parent and a child -// If the child cannot inherit from the parent, an error is returned -func (r *Ref) Inherits(child Ref) (*Ref, error) { - childURL := child.GetURL() - parentURL := r.GetURL() - if childURL == nil { - return nil, errors.New("child url is nil") - } - if parentURL == nil { - return &child, nil - } - - ref, err := New(parentURL.ResolveReference(childURL).String()) - if err != nil { - return nil, err - } - return &ref, nil -} diff --git a/vendor/github.com/go-openapi/swag/.codecov.yml b/vendor/github.com/go-openapi/swag/.codecov.yml new file mode 100644 index 0000000000..3354f44b28 --- /dev/null +++ b/vendor/github.com/go-openapi/swag/.codecov.yml @@ -0,0 +1,4 @@ +ignore: + - jsonutils/fixtures_test + - jsonutils/adapters/ifaces/mocks + - jsonutils/adapters/testintegration/benchmarks diff --git a/vendor/github.com/go-openapi/swag/.golangci.yml b/vendor/github.com/go-openapi/swag/.golangci.yml index 80e2be0042..4129e7e57e 100644 --- a/vendor/github.com/go-openapi/swag/.golangci.yml +++ b/vendor/github.com/go-openapi/swag/.golangci.yml @@ -1,60 +1,77 @@ -linters-settings: - govet: - check-shadowing: true - golint: - min-confidence: 0 - gocyclo: - min-complexity: 45 - maligned: - suggest-new: true - dupl: - threshold: 200 - goconst: - min-len: 3 - min-occurrences: 3 - +version: "2" linters: - enable-all: true + default: all disable: - - maligned - - lll - - gochecknoinits - - gochecknoglobals + - cyclop + - depguard + - errchkjson + - errorlint + - exhaustruct + - forcetypeassert - funlen - - godox + - gochecknoglobals + - gochecknoinits - gocognit - - whitespace - - wsl - - wrapcheck - - testpackage - - nlreturn - - gomnd - - exhaustivestruct - - goerr113 - - errorlint - - nestif - godot - - gofumpt + - godox + - gomoddirectives + - gosmopolitan + - inamedparam + - intrange + - ireturn + - lll + - musttag + - nestif + - nlreturn + - nonamedreturns + - noinlineerr - paralleltest - - tparallel + - recvcheck + - testpackage - thelper - - ifshort - - exhaustruct + - tagliatelle + - tparallel + - unparam - varnamelen - - gci - - depguard - - errchkjson - - inamedparam - - nonamedreturns - - musttag - - ireturn - - forcetypeassert - - cyclop - # deprecated linters - - deadcode - - interfacer - - scopelint - - varcheck - - structcheck - - golint - - nosnakecase + - whitespace + - wrapcheck + - wsl + - wsl_v5 + settings: + dupl: + threshold: 200 + goconst: + min-len: 2 + min-occurrences: 3 + gocyclo: + min-complexity: 45 + exclusions: + generated: lax + presets: + - comments + - common-false-positives + - legacy + - std-error-handling + paths: + - third_party$ + - builtin$ + - examples$ +formatters: + enable: + - gofmt + - goimports + exclusions: + generated: lax + paths: + - third_party$ + - builtin$ + - examples$ +issues: + # Maximum issues count per one linter. + # Set to 0 to disable. + # Default: 50 + max-issues-per-linter: 0 + # Maximum count of issues with the same text. + # Set to 0 to disable. + # Default: 3 + max-same-issues: 0 diff --git a/vendor/github.com/go-openapi/swag/.mockery.yml b/vendor/github.com/go-openapi/swag/.mockery.yml new file mode 100644 index 0000000000..8557cb58d3 --- /dev/null +++ b/vendor/github.com/go-openapi/swag/.mockery.yml @@ -0,0 +1,30 @@ +all: false +dir: '{{.InterfaceDir}}' +filename: mocks_test.go +force-file-write: true +formatter: goimports +include-auto-generated: false +log-level: info +structname: '{{.Mock}}{{.InterfaceName}}' +pkgname: '{{.SrcPackageName}}' +recursive: false +require-template-schema-exists: true +template: matryer +template-schema: '{{.Template}}.schema.json' +packages: + github.com/go-openapi/swag/jsonutils/adapters/ifaces: + config: + dir: jsonutils/adapters/ifaces/mocks + filename: mocks.go + pkgname: 'mocks' + force-file-write: true + all: true + github.com/go-openapi/swag/jsonutils/adapters/testintegration: + config: + inpackage: true + dir: jsonutils/adapters/testintegration + force-file-write: true + all: true + interfaces: + EJMarshaler: + EJUnmarshaler: diff --git a/vendor/github.com/go-openapi/swag/README.md b/vendor/github.com/go-openapi/swag/README.md index a729222998..786b92fd3a 100644 --- a/vendor/github.com/go-openapi/swag/README.md +++ b/vendor/github.com/go-openapi/swag/README.md @@ -1,23 +1,192 @@ # Swag [![Build Status](https://github.com/go-openapi/swag/actions/workflows/go-test.yml/badge.svg)](https://github.com/go-openapi/swag/actions?query=workflow%3A"go+test") [![codecov](https://codecov.io/gh/go-openapi/swag/branch/master/graph/badge.svg)](https://codecov.io/gh/go-openapi/swag) [![Slack Status](https://slackin.goswagger.io/badge.svg)](https://slackin.goswagger.io) -[![license](http://img.shields.io/badge/license-Apache%20v2-orange.svg)](https://raw.githubusercontent.com/go-openapi/swag/master/LICENSE) +[![license](https://img.shields.io/badge/license-Apache%20v2-orange.svg)](https://raw.githubusercontent.com/go-openapi/swag/master/LICENSE) [![Go Reference](https://pkg.go.dev/badge/github.com/go-openapi/swag.svg)](https://pkg.go.dev/github.com/go-openapi/swag) [![Go Report Card](https://goreportcard.com/badge/github.com/go-openapi/swag)](https://goreportcard.com/report/github.com/go-openapi/swag) -Contains a bunch of helper functions for go-openapi and go-swagger projects. +Package `swag` contains a bunch of helper functions for go-openapi and go-swagger projects. You may also use it standalone for your projects. -* convert between value and pointers for builtin types -* convert from string to builtin types (wraps strconv) -* fast json concatenation -* search in path -* load from file or http -* name mangling +> **NOTE** +> `swag` is one of the foundational building blocks of the go-openapi initiative. +> Most repositories in `github.com/go-openapi/...` depend on it in some way. +> And so does our CLI tool `github.com/go-swagger/go-swagger`, +> as well as the code generated by this tool. +* [Contents](#contents) +* [Dependencies](#dependencies) +* [Release Notes](#release-notes) +* [Note to contributors](#note-to-contributors) +* [TODOs, suggestions and plans](#todos-suggestions-and-plans) -This repo has only few dependencies outside of the standard library: +## Contents -* YAML utilities depend on `gopkg.in/yaml.v3` -* `github.com/mailru/easyjson v0.7.7` +`go-openapi/swag` exposes a collection of relatively independent modules. + +Moving forward, no additional feature will be added to the `swag` API directly at the root package level, +which remains there for backward-compatibility purposes. All exported top-level features are now deprecated. + +Child modules will continue to evolve and some new ones may be added in the future. + +| Module | Content | Main features | +|---------------|---------|---------------| +| `cmdutils` | utilities to work with CLIs || +| `conv` | type conversion utilities | convert between values and pointers for any types
convert from string to builtin types (wraps `strconv`)
require `./typeutils` (test dependency)
| +| `fileutils` | file utilities | | +| `jsonname` | JSON utilities | infer JSON names from `go` properties
| +| `jsonutils` | JSON utilities | fast json concatenation
read and write JSON from and to dynamic `go` data structures
~require `github.com/mailru/easyjson`~
| +| `loading` | file loading | load from file or http
require `./yamlutils`
| +| `mangling` | safe name generation | name mangling for `go`
| +| `netutils` | networking utilities | host, port from address
| +| `stringutils` | `string` utilities | search in slice (with case-insensitive)
split/join query parameters as arrays
| +| `typeutils` | `go` types utilities | check the zero value for any type
safe check for a nil value
| +| `yamlutils` | YAML utilities | converting YAML to JSON
loading YAML into a dynamic YAML document
maintaining the original order of keys in YAML objects
require `./jsonutils`
~require `github.com/mailru/easyjson`~
require `go.yaml.in/yaml/v3`
| + +--- + +## Dependencies + +The root module `github.com/go-openapi/swag` at the repo level maintains a few +dependencies outside of the standard library. + +* YAML utilities depend on `go.yaml.in/yaml/v3` +* JSON utilities depend on their registered adapter module: + * by default, only the standard library is used + * `github.com/mailru/easyjson` is now only a dependency for module + `github.com/go-openapi/swag/jsonutils/adapters/easyjson/json`, + for users willing to import that module. + * integration tests and benchmarks use all the dependencies are published as their own module +* other dependencies are test dependencies drawn from `github.com/stretchr/testify` + +## Release notes + +### v0.25.1 + +* fixes a data race that could occur when using the standard library implementation of a JSON ordered map + +### v0.25.0 + +**New with this release**: + +* requires `go1.24`, as iterators are being introduced +* removes the dependency to `mailru/easyjson` by default (#68) + * functionality remains the same, but performance may somewhat degrade for applications + that relied on `easyjson` + * users of the JSON or YAML utilities who want to use `easyjson` as their prefered JSON serializer library + will be able to do so by registering this the corresponding JSON adapter at runtime. See below. + * ordered keys in JSON and YAML objects: this feature used to rely solely on `easyjson`. + With this release, an implementation relying on the standard `encoding/json` is provided. + * an independent [benchmark](./jsonutils/adapters/testintegration/benchmarks/README.md) to compare the different adapters +* improves the "float is integer" check (`conv.IsFloat64AJSONInteger`) (#59) +* removes the _direct_ dependency to `gopkg.in/yaml.v3` (indirect dependency is still incurred through `stretchr/testify`) (#127) +* exposed `conv.IsNil()` (previously kept private): a safe nil check (accounting for the "non-nil interface with nil value" nonsensical go trick) + +**What coming next?** + +Moving forward, we want to : +* provide an implementation of the JSON adapter based on `encoding/json/v2`, for `go1.25` builds. +* provide similar implementations for `goccy/go-json` and `jsoniterator/go`, and perhaps some other + similar libraries may be interesting too. + + +**How to explicitly register a dependency at runtime**? + +The following would maintain how JSON utilities proposed by `swag` used work, up to `v0.24.1`. + + ```go + import "github.com/go-openapi/swag/jsonutils/adapters/easyjson/json" + + func init() { + json.Register() + } + ``` + +Subsequent calls to `jsonutils.ReadJSON()` or `jsonutils.WriteJSON()` will switch to `easyjson` +whenever the passed data structures implement the `easyjson.Unmarshaler` or `easyjson.Marshaler` respectively, +or fallback to the standard library. + +### v0.24.0 + +With this release, we have largely modernized the API of `swag`: + +* The traditional `swag` API is still supported: code that imports `swag` will still + compile and work the same. +* A deprecation notice is published to encourage consumers of this library to adopt + the newer API +* **Deprecation notice** + * configuration through global variables is now deprecated, in favor of options passed as parameters + * all helper functions are moved to more specialized packages, which are exposed as + go modules. Importing such a module would reduce the footprint of dependencies. + * _all_ functions, variables, constants exposed by the deprecated API have now moved, so + that consumers of the new API no longer need to import github.com/go-openapi/swag, but + should import the desired sub-module(s). + +**New with this release**: + +* [x] type converters and pointer to value helpers now support generic types +* [x] name mangling now support pluralized initialisms (issue #46) + Strings like "contact IDs" are now recognized as such a plural form and mangled as a linter would expect. +* [x] performance: small improvements to reduce the overhead of convert/format wrappers (see issues #110, or PR #108) +* [x] performance: name mangling utilities run ~ 10% faster (PR #115) + +--- + +## Note to contributors + +A mono-repo structure comes with some unavoidable extra pains... + +* Testing + +> The usual `go test ./...` command, run from the root of this repo won't work any longer to test all submodules. +> +> Each module constitutes an independant unit of test. So you have to run `go test` inside each module. +> Or you may take a look at how this is achieved by CI +> [here] https://github.com/go-openapi/swag/blob/master/.github/workflows/go-test.yml). +> +> There are also some alternative tricks using `go work`, for local development, if you feel comfortable with +> go workspaces. Perhaps some day, we'll have a `go work test` to run all tests without any hack. + +* Releasing + +> Each module follows its own independant module versioning. +> +> So you have tags like `mangling/v0.24.0`, `fileutils/v0.24.0` etc that are used by `go mod` and `go get` +> to refer to the tagged version of each module specifically. +> +> This means we may release patches etc to each module independently. +> +> We'd like to adopt the rule that modules in this repo would only differ by a patch version +> (e.g. `v0.24.5` vs `v0.24.3`), and we'll level all modules whenever a minor version is introduced. +> +> A script in `./hack` is provided to tag all modules with the same version in one go. + +* Continuous integration + +> At this moment, all tests in all modules are systematically run over the full test matrix (3 platform x 2 go releases). +> This generates quite a lot of jobs. +> +> We ought to reduce the number of jobs required to test a PR focused on only a few modules. + +## Todos, suggestions and plans + +All kinds of contributions are welcome. + +A few ideas: + +* [x] Complete the split of dependencies to isolate easyjson from the rest +* [x] Improve CI to reduce needed tests +* [x] Replace dependency to `gopkg.in/yaml.v3` (`yamlutil`) +* [ ] Improve mangling utilities (improve readability, support for capitalized words, + better word substitution for non-letter symbols...) +* [ ] Move back to this common shared pot a few of the technical features introduced by go-swagger independently + (e.g. mangle go package names, search package with go modules support, ...) +* [ ] Apply a similar mono-repo approach to go-openapi/strfmt which suffer from similar woes: bloated API, + imposed dependency to some database driver. +* [ ] Adapt `go-swagger` (incl. generated code) to the new `swag` API. +* [ ] Factorize some tests, as there is a lot of redundant testing code in `jsonutils` +* [ ] Benchmark & profiling: publish independently the tool built to analyze and chart benchmarks (e.g. similar to `benchvisual`) +* [ ] more thorough testing for nil / null case +* [ ] ci pipeline to manage releases +* [ ] cleaner mockery generation (doesn't work out of the box for all sub-modules) diff --git a/vendor/github.com/go-openapi/swag/TODO.md b/vendor/github.com/go-openapi/swag/TODO.md new file mode 100644 index 0000000000..129888038b --- /dev/null +++ b/vendor/github.com/go-openapi/swag/TODO.md @@ -0,0 +1 @@ +fix data race https://github.com/go-openapi/swag/actions/runs/17989156861/job/51174860188 diff --git a/vendor/github.com/go-openapi/swag/cmdutils/LICENSE b/vendor/github.com/go-openapi/swag/cmdutils/LICENSE new file mode 100644 index 0000000000..d645695673 --- /dev/null +++ b/vendor/github.com/go-openapi/swag/cmdutils/LICENSE @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/go-openapi/swag/cmdutils/cmd_utils.go b/vendor/github.com/go-openapi/swag/cmdutils/cmd_utils.go new file mode 100644 index 0000000000..bc01ec2bb9 --- /dev/null +++ b/vendor/github.com/go-openapi/swag/cmdutils/cmd_utils.go @@ -0,0 +1,24 @@ +// Copyright 2015 go-swagger maintainers +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package cmdutils + +// CommandLineOptionsGroup represents a group of user-defined command line options. +// +// This is for instance used to configure command line arguments in API servers generated by go-swagger. +type CommandLineOptionsGroup struct { + ShortDescription string + LongDescription string + Options interface{} +} diff --git a/vendor/github.com/go-openapi/swag/cmdutils/doc.go b/vendor/github.com/go-openapi/swag/cmdutils/doc.go new file mode 100644 index 0000000000..63ac1d17ee --- /dev/null +++ b/vendor/github.com/go-openapi/swag/cmdutils/doc.go @@ -0,0 +1,16 @@ +// Copyright 2015 go-swagger maintainers +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package cmdutils brings helpers for CLIs produced by go-openapi +package cmdutils diff --git a/vendor/github.com/go-openapi/swag/cmdutils_iface.go b/vendor/github.com/go-openapi/swag/cmdutils_iface.go new file mode 100644 index 0000000000..1eaf36f157 --- /dev/null +++ b/vendor/github.com/go-openapi/swag/cmdutils_iface.go @@ -0,0 +1,22 @@ +// Copyright 2015 go-swagger maintainers +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package swag + +import "github.com/go-openapi/swag/cmdutils" + +// CommandLineOptionsGroup represents a group of user-defined command line options. +// +// Deprecated: use [cmdutils.CommandLineOptionsGroup] instead. +type CommandLineOptionsGroup = cmdutils.CommandLineOptionsGroup diff --git a/vendor/github.com/go-openapi/swag/conv/LICENSE b/vendor/github.com/go-openapi/swag/conv/LICENSE new file mode 100644 index 0000000000..d645695673 --- /dev/null +++ b/vendor/github.com/go-openapi/swag/conv/LICENSE @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/go-openapi/swag/conv/convert.go b/vendor/github.com/go-openapi/swag/conv/convert.go new file mode 100644 index 0000000000..b9b8698543 --- /dev/null +++ b/vendor/github.com/go-openapi/swag/conv/convert.go @@ -0,0 +1,172 @@ +// Copyright 2015 go-swagger maintainers +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package conv + +import ( + "math" + "strconv" + "strings" +) + +// same as ECMA Number.MAX_SAFE_INTEGER and Number.MIN_SAFE_INTEGER +const ( + maxJSONFloat = float64(1<<53 - 1) // 9007199254740991.0 2^53 - 1 + minJSONFloat = -float64(1<<53 - 1) //-9007199254740991.0 -2^53 - 1 + epsilon float64 = 1e-9 +) + +// IsFloat64AJSONInteger allows for integers [-2^53, 2^53-1] inclusive. +func IsFloat64AJSONInteger(f float64) bool { + if math.IsNaN(f) || math.IsInf(f, 0) || f < minJSONFloat || f > maxJSONFloat { + return false + } + rounded := math.Round(f) + if f == rounded { + return true + } + if rounded == 0 { // f = 0.0 exited above + return false + } + + diff := math.Abs(f - rounded) + if diff == 0 { + return true + } + + // relative error Abs{f - Round(f)) / Round(f)} < ε ; Round(f) + return diff < epsilon*math.Abs(rounded) +} + +// ConvertFloat turns a string into a float numerical value. +func ConvertFloat[T Float](str string) (T, error) { + var v T + f, err := strconv.ParseFloat(str, bitsize(v)) + if err != nil { + return 0, err + } + + return T(f), nil +} + +// ConvertInteger turns a string into a signed integer. +func ConvertInteger[T Signed](str string) (T, error) { + var v T + f, err := strconv.ParseInt(str, 10, bitsize(v)) + if err != nil { + return 0, err + } + + return T(f), nil +} + +// ConvertUinteger turns a string into an unsigned integer. +func ConvertUinteger[T Unsigned](str string) (T, error) { + var v T + f, err := strconv.ParseUint(str, 10, bitsize(v)) + if err != nil { + return 0, err + } + + return T(f), nil +} + +// ConvertBool turns a string into a boolean. +// +// It supports a few more "true" strings than [strconv.ParseBool]: +// +// - it is not case sensitive ("trUe" or "FalsE" work) +// - "ok", "yes", "y", "on", "selected", "checked", "enabled" are all true +// - everything that is not true is false: there is never an actual error returned +func ConvertBool(str string) (bool, error) { + switch strings.ToLower(str) { + case "true", + "1", + "yes", + "ok", + "y", + "on", + "selected", + "checked", + "t", + "enabled": + return true, nil + default: + return false, nil + } +} + +// ConvertFloat32 turns a string into a float32. +func ConvertFloat32(str string) (float32, error) { return ConvertFloat[float32](str) } + +// ConvertFloat64 turns a string into a float64 +func ConvertFloat64(str string) (float64, error) { return ConvertFloat[float64](str) } + +// ConvertInt8 turns a string into an int8 +func ConvertInt8(str string) (int8, error) { return ConvertInteger[int8](str) } + +// ConvertInt16 turns a string into an int16 +func ConvertInt16(str string) (int16, error) { + i, err := strconv.ParseInt(str, 10, 16) + if err != nil { + return 0, err + } + return int16(i), nil +} + +// ConvertInt32 turns a string into an int32 +func ConvertInt32(str string) (int32, error) { + i, err := strconv.ParseInt(str, 10, 32) + if err != nil { + return 0, err + } + return int32(i), nil +} + +// ConvertInt64 turns a string into an int64 +func ConvertInt64(str string) (int64, error) { + return strconv.ParseInt(str, 10, 64) +} + +// ConvertUint8 turns a string into an uint8 +func ConvertUint8(str string) (uint8, error) { + i, err := strconv.ParseUint(str, 10, 8) + if err != nil { + return 0, err + } + return uint8(i), nil +} + +// ConvertUint16 turns a string into an uint16 +func ConvertUint16(str string) (uint16, error) { + i, err := strconv.ParseUint(str, 10, 16) + if err != nil { + return 0, err + } + return uint16(i), nil +} + +// ConvertUint32 turns a string into an uint32 +func ConvertUint32(str string) (uint32, error) { + i, err := strconv.ParseUint(str, 10, 32) + if err != nil { + return 0, err + } + return uint32(i), nil +} + +// ConvertUint64 turns a string into an uint64 +func ConvertUint64(str string) (uint64, error) { + return strconv.ParseUint(str, 10, 64) +} diff --git a/vendor/github.com/go-openapi/swag/conv/convert_types.go b/vendor/github.com/go-openapi/swag/conv/convert_types.go new file mode 100644 index 0000000000..423e8663f8 --- /dev/null +++ b/vendor/github.com/go-openapi/swag/conv/convert_types.go @@ -0,0 +1,79 @@ +// Copyright 2015 go-swagger maintainers +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package conv + +// The original version of this file, eons ago, was taken from the aws go sdk + +// Pointer returns a pointer to the value passed in. +func Pointer[T any](v T) *T { + return &v +} + +// Value returns a shallow copy of the value of the pointer passed in. +// +// If the pointer is nil, the returned value is the zero value. +func Value[T any](v *T) T { + if v != nil { + return *v + } + + var zero T + return zero +} + +// PointerSlice converts a slice of values into a slice of pointers. +func PointerSlice[T any](src []T) []*T { + dst := make([]*T, len(src)) + for i := 0; i < len(src); i++ { + dst[i] = &(src[i]) + } + return dst +} + +// ValueSlice converts a slice of pointers into a slice of values. +// +// nil elements are zero values. +func ValueSlice[T any](src []*T) []T { + dst := make([]T, len(src)) + for i := 0; i < len(src); i++ { + if src[i] != nil { + dst[i] = *(src[i]) + } + } + return dst +} + +// PointerMap converts a map of values into a map of pointers. +func PointerMap[K comparable, T any](src map[K]T) map[K]*T { + dst := make(map[K]*T) + for k, val := range src { + v := val + dst[k] = &v + } + return dst +} + +// ValueMap converts a map of pointers into a map of values. +// +// nil elements are skipped. +func ValueMap[K comparable, T any](src map[K]*T) map[K]T { + dst := make(map[K]T) + for k, val := range src { + if val != nil { + dst[k] = *val + } + } + return dst +} diff --git a/vendor/github.com/go-openapi/swag/conv/doc.go b/vendor/github.com/go-openapi/swag/conv/doc.go new file mode 100644 index 0000000000..b02711f422 --- /dev/null +++ b/vendor/github.com/go-openapi/swag/conv/doc.go @@ -0,0 +1,26 @@ +// Copyright 2015 go-swagger maintainers +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package conv exposes utilities to convert types. +// +// The Convert and Format families of functions are essentially a shorthand to [strconv] functions, +// using the decimal representation of numbers. +// +// Features: +// +// - from string representation to value ("Convert*") and reciprocally ("Format*") +// - from pointer to value ([Value]) and reciprocally ([Pointer]) +// - from slice of values to slice of pointers ([PointerSlice]) and reciprocally ([ValueSlice]) +// - from map of values to map of pointers ([PointerMap]) and reciprocally ([ValueMap]) +package conv diff --git a/vendor/github.com/go-openapi/swag/conv/format.go b/vendor/github.com/go-openapi/swag/conv/format.go new file mode 100644 index 0000000000..db7562a4a3 --- /dev/null +++ b/vendor/github.com/go-openapi/swag/conv/format.go @@ -0,0 +1,39 @@ +// Copyright 2015 go-swagger maintainers +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package conv + +import ( + "strconv" +) + +// FormatInteger turns an integer type into a string. +func FormatInteger[T Signed](value T) string { + return strconv.FormatInt(int64(value), 10) +} + +// FormatUinteger turns an unsigned integer type into a string. +func FormatUinteger[T Unsigned](value T) string { + return strconv.FormatUint(uint64(value), 10) +} + +// FormatFloat turns a floating point numerical value into a string. +func FormatFloat[T Float](value T) string { + return strconv.FormatFloat(float64(value), 'f', -1, bitsize(value)) +} + +// FormatBool turns a boolean into a string. +func FormatBool(value bool) string { + return strconv.FormatBool(value) +} diff --git a/vendor/github.com/go-openapi/swag/conv/sizeof.go b/vendor/github.com/go-openapi/swag/conv/sizeof.go new file mode 100644 index 0000000000..646f8be9aa --- /dev/null +++ b/vendor/github.com/go-openapi/swag/conv/sizeof.go @@ -0,0 +1,17 @@ +package conv + +import "unsafe" + +// bitsize returns the size in bits of a type. +// +// NOTE: [unsafe.SizeOf] simply returns the size in bytes of the value. +// For primitive types T, the generic stencil is precompiled and this value +// is resolved at compile time, resulting in an immediate call to [strconv.ParseFloat]. +// +// We may leave up to the go compiler to simplify this function into a +// constant value, which happens in practice at least for primitive types +// (e.g. numerical types). +func bitsize[T Numerical](value T) int { + const bitsPerByte = 8 + return int(unsafe.Sizeof(value)) * bitsPerByte +} diff --git a/vendor/github.com/go-openapi/swag/conv/type_constraints.go b/vendor/github.com/go-openapi/swag/conv/type_constraints.go new file mode 100644 index 0000000000..3c61498361 --- /dev/null +++ b/vendor/github.com/go-openapi/swag/conv/type_constraints.go @@ -0,0 +1,40 @@ +// Copyright 2015 go-swagger maintainers +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package conv + +type ( + // these type constraints are redefined after golang.org/x/exp/constraints, + // because importing that package causes an undesired go upgrade. + + // Signed integer types, cf. [golang.org/x/exp/constraints.Signed] + Signed interface { + ~int | ~int8 | ~int16 | ~int32 | ~int64 + } + + // Unsigned integer types, cf. [golang.org/x/exp/constraints.Unsigned] + Unsigned interface { + ~uint | ~uint8 | ~uint16 | ~uint32 | ~uint64 | ~uintptr + } + + // Float numerical types, cf. [golang.org/x/exp/constraints.Float] + Float interface { + ~float32 | ~float64 + } + + // Numerical types + Numerical interface { + Signed | Unsigned | Float + } +) diff --git a/vendor/github.com/go-openapi/swag/conv_iface.go b/vendor/github.com/go-openapi/swag/conv_iface.go new file mode 100644 index 0000000000..9991acb651 --- /dev/null +++ b/vendor/github.com/go-openapi/swag/conv_iface.go @@ -0,0 +1,497 @@ +// Copyright 2015 go-swagger maintainers +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package swag + +import ( + "time" + + "github.com/go-openapi/swag/conv" +) + +// IsFloat64AJSONInteger allows for integers [-2^53, 2^53-1] inclusive. +// +// Deprecated: use [conv.IsFloat64AJSONInteger] instead. +func IsFloat64AJSONInteger(f float64) bool { return conv.IsFloat64AJSONInteger(f) } + +// ConvertBool turns a string into a boolean. +// +// Deprecated: use [conv.ConvertBool] instead. +func ConvertBool(str string) (bool, error) { return conv.ConvertBool(str) } + +// ConvertFloat32 turns a string into a float32. +// +// Deprecated: use [conv.ConvertFloat32] instead. Alternatively, you may use the generic version [conv.ConvertFloat]. +func ConvertFloat32(str string) (float32, error) { return conv.ConvertFloat[float32](str) } + +// ConvertFloat64 turns a string into a float64. +// +// Deprecated: use [conv.ConvertFloat64] instead. Alternatively, you may use the generic version [conv.ConvertFloat]. +func ConvertFloat64(str string) (float64, error) { return conv.ConvertFloat[float64](str) } + +// ConvertInt8 turns a string into an int8. +// +// Deprecated: use [conv.ConvertInt8] instead. Alternatively, you may use the generic version [conv.ConvertInteger]. +func ConvertInt8(str string) (int8, error) { return conv.ConvertInteger[int8](str) } + +// ConvertInt16 turns a string into an int16. +// +// Deprecated: use [conv.ConvertInt16] instead. Alternatively, you may use the generic version [conv.ConvertInteger]. +func ConvertInt16(str string) (int16, error) { return conv.ConvertInteger[int16](str) } + +// ConvertInt32 turns a string into an int32. +// +// Deprecated: use [conv.ConvertInt32] instead. Alternatively, you may use the generic version [conv.ConvertInteger]. +func ConvertInt32(str string) (int32, error) { return conv.ConvertInteger[int32](str) } + +// ConvertInt64 turns a string into an int64. +// +// Deprecated: use [conv.ConvertInt64] instead. Alternatively, you may use the generic version [conv.ConvertInteger]. +func ConvertInt64(str string) (int64, error) { return conv.ConvertInteger[int64](str) } + +// ConvertUint8 turns a string into an uint8. +// +// Deprecated: use [conv.ConvertUint8] instead. Alternatively, you may use the generic version [conv.ConvertUinteger]. +func ConvertUint8(str string) (uint8, error) { return conv.ConvertUinteger[uint8](str) } + +// ConvertUint16 turns a string into an uint16. +// +// Deprecated: use [conv.ConvertUint16] instead. Alternatively, you may use the generic version [conv.ConvertUinteger]. +func ConvertUint16(str string) (uint16, error) { return conv.ConvertUinteger[uint16](str) } + +// ConvertUint32 turns a string into an uint32. +// +// Deprecated: use [conv.ConvertUint32] instead. Alternatively, you may use the generic version [conv.ConvertUinteger]. +func ConvertUint32(str string) (uint32, error) { return conv.ConvertUinteger[uint32](str) } + +// ConvertUint64 turns a string into an uint64. +// +// Deprecated: use [conv.ConvertUint64] instead. Alternatively, you may use the generic version [conv.ConvertUinteger]. +func ConvertUint64(str string) (uint64, error) { return conv.ConvertUinteger[uint64](str) } + +// FormatBool turns a boolean into a string. +// +// Deprecated: use [conv.FormatBool] instead. +func FormatBool(value bool) string { return conv.FormatBool(value) } + +// FormatFloat32 turns a float32 into a string. +// +// Deprecated: use [conv.FormatFloat] instead. +func FormatFloat32(value float32) string { return conv.FormatFloat(value) } + +// FormatFloat64 turns a float64 into a string. +// +// Deprecated: use [conv.FormatFloat] instead. +func FormatFloat64(value float64) string { return conv.FormatFloat(value) } + +// FormatInt8 turns an int8 into a string. +// +// Deprecated: use [conv.FormatInteger] instead. +func FormatInt8(value int8) string { return conv.FormatInteger(value) } + +// FormatInt16 turns an int16 into a string. +// +// Deprecated: use [conv.FormatInteger] instead. +func FormatInt16(value int16) string { return conv.FormatInteger(value) } + +// FormatInt32 turns an int32 into a string +// +// Deprecated: use [conv.FormatInteger] instead. +func FormatInt32(value int32) string { return conv.FormatInteger(value) } + +// FormatInt64 turns an int64 into a string. +// +// Deprecated: use [conv.FormatInteger] instead. +func FormatInt64(value int64) string { return conv.FormatInteger(value) } + +// FormatUint8 turns an uint8 into a string. +// +// Deprecated: use [conv.FormatUinteger] instead. +func FormatUint8(value uint8) string { return conv.FormatUinteger(value) } + +// FormatUint16 turns an uint16 into a string. +// +// Deprecated: use [conv.FormatUinteger] instead. +func FormatUint16(value uint16) string { return conv.FormatUinteger(value) } + +// FormatUint32 turns an uint32 into a string. +// +// Deprecated: use [conv.FormatUinteger] instead. +func FormatUint32(value uint32) string { return conv.FormatUinteger(value) } + +// FormatUint64 turns an uint64 into a string. +// +// Deprecated: use [conv.FormatUinteger] instead. +func FormatUint64(value uint64) string { return conv.FormatUinteger(value) } + +// String turn a pointer to of the string value passed in. +// +// Deprecated: use [conv.Pointer] instead. +func String(v string) *string { return conv.Pointer(v) } + +// StringValue turn the value of the string pointer passed in or +// "" if the pointer is nil. +// +// Deprecated: use [conv.Value] instead. +func StringValue(v *string) string { return conv.Value(v) } + +// StringSlice converts a slice of string values into a slice of string pointers. +// +// Deprecated: use [conv.PointerSlice] instead. +func StringSlice(src []string) []*string { return conv.PointerSlice(src) } + +// StringValueSlice converts a slice of string pointers into a slice of string values. +// +// Deprecated: use [conv.ValueSlice] instead. +func StringValueSlice(src []*string) []string { return conv.ValueSlice(src) } + +// StringMap converts a string map of string values into a string map of string pointers. +// +// Deprecated: use [conv.PointerMap] instead. +func StringMap(src map[string]string) map[string]*string { return conv.PointerMap(src) } + +// StringValueMap converts a string map of string pointers into a string map of string values. +// +// Deprecated: use [conv.ValueMap] instead. +func StringValueMap(src map[string]*string) map[string]string { return conv.ValueMap(src) } + +// Bool turn a pointer to of the bool value passed in. +// +// Deprecated: use [conv.Pointer] instead. +func Bool(v bool) *bool { return conv.Pointer(v) } + +// BoolValue turn the value of the bool pointer passed in or false if the pointer is nil. +// +// Deprecated: use [conv.Value] instead. +func BoolValue(v *bool) bool { return conv.Value(v) } + +// BoolSlice converts a slice of bool values into a slice of bool pointers. +// +// Deprecated: use [conv.PointerSlice] instead. +func BoolSlice(src []bool) []*bool { return conv.PointerSlice(src) } + +// BoolValueSlice converts a slice of bool pointers into a slice of bool values. +// +// Deprecated: use [conv.ValueSlice] instead. +func BoolValueSlice(src []*bool) []bool { return conv.ValueSlice(src) } + +// BoolMap converts a string map of bool values into a string map of bool pointers. +// +// Deprecated: use [conv.PointerMap] instead. +func BoolMap(src map[string]bool) map[string]*bool { return conv.PointerMap(src) } + +// BoolValueMap converts a string map of bool pointers into a string map of bool values. +// +// Deprecated: use [conv.ValueMap] instead. +func BoolValueMap(src map[string]*bool) map[string]bool { return conv.ValueMap(src) } + +// Int turn a pointer to of the int value passed in. +// +// Deprecated: use [conv.Pointer] instead. +func Int(v int) *int { return conv.Pointer(v) } + +// IntValue turn the value of the int pointer passed in or 0 if the pointer is nil. +// +// Deprecated: use [conv.Value] instead. +func IntValue(v *int) int { return conv.Value(v) } + +// IntSlice converts a slice of int values into a slice of int pointers. +// +// Deprecated: use [conv.PointerSlice] instead. +func IntSlice(src []int) []*int { return conv.PointerSlice(src) } + +// IntValueSlice converts a slice of int pointers into a slice of int values. +// +// Deprecated: use [conv.ValueSlice] instead. +func IntValueSlice(src []*int) []int { return conv.ValueSlice(src) } + +// IntMap converts a string map of int values into a string map of int pointers. +// +// Deprecated: use [conv.PointerMap] instead. +func IntMap(src map[string]int) map[string]*int { return conv.PointerMap(src) } + +// IntValueMap converts a string map of int pointers into a string map of int values. +// +// Deprecated: use [conv.ValueMap] instead. +func IntValueMap(src map[string]*int) map[string]int { return conv.ValueMap(src) } + +// Int32 turn a pointer to of the int32 value passed in. +// +// Deprecated: use [conv.Pointer] instead. +func Int32(v int32) *int32 { return conv.Pointer(v) } + +// Int32Value turn the value of the int32 pointer passed in or 0 if the pointer is nil. +// +// Deprecated: use [conv.Value] instead. +func Int32Value(v *int32) int32 { return conv.Value(v) } + +// Int32Slice converts a slice of int32 values into a slice of int32 pointers. +// +// Deprecated: use [conv.PointerSlice] instead. +func Int32Slice(src []int32) []*int32 { return conv.PointerSlice(src) } + +// Int32ValueSlice converts a slice of int32 pointers into a slice of int32 values. +// +// Deprecated: use [conv.ValueSlice] instead. +func Int32ValueSlice(src []*int32) []int32 { return conv.ValueSlice(src) } + +// Int32Map converts a string map of int32 values into a string map of int32 pointers. +// +// Deprecated: use [conv.PointerMap] instead. +func Int32Map(src map[string]int32) map[string]*int32 { return conv.PointerMap(src) } + +// Int32ValueMap converts a string map of int32 pointers into a string map of int32 values. +// +// Deprecated: use [conv.ValueMap] instead. +func Int32ValueMap(src map[string]*int32) map[string]int32 { return conv.ValueMap(src) } + +// Int64 turn a pointer to of the int64 value passed in. +// +// Deprecated: use [conv.Pointer] instead. +func Int64(v int64) *int64 { return conv.Pointer(v) } + +// Int64Value turn the value of the int64 pointer passed in or 0 if the pointer is nil. +// +// Deprecated: use [conv.Value] instead. +func Int64Value(v *int64) int64 { return conv.Value(v) } + +// Int64Slice converts a slice of int64 values into a slice of int64 pointers. +// +// Deprecated: use [conv.PointerSlice] instead. +func Int64Slice(src []int64) []*int64 { return conv.PointerSlice(src) } + +// Int64ValueSlice converts a slice of int64 pointers into a slice of int64 values. +// +// Deprecated: use [conv.ValueSlice] instead. +func Int64ValueSlice(src []*int64) []int64 { return conv.ValueSlice(src) } + +// Int64Map converts a string map of int64 values into a string map of int64 pointers. +// +// Deprecated: use [conv.PointerMap] instead. +func Int64Map(src map[string]int64) map[string]*int64 { return conv.PointerMap(src) } + +// Int64ValueMap converts a string map of int64 pointers into a string map of int64 values. +// +// Deprecated: use [conv.ValueMap] instead. +func Int64ValueMap(src map[string]*int64) map[string]int64 { return conv.ValueMap(src) } + +// Uint16 turn a pointer to of the uint16 value passed in. +// +// Deprecated: use [conv.Pointer] instead. +func Uint16(v uint16) *uint16 { return conv.Pointer(v) } + +// Uint16Value turn the value of the uint16 pointer passed in or 0 if the pointer is nil. +// +// Deprecated: use [conv.Value] instead. +func Uint16Value(v *uint16) uint16 { return conv.Value(v) } + +// Uint16Slice converts a slice of uint16 values into a slice of uint16 pointers. +// +// Deprecated: use [conv.PointerSlice] instead. +func Uint16Slice(src []uint16) []*uint16 { return conv.PointerSlice(src) } + +// Uint16ValueSlice converts a slice of uint16 pointers into a slice of uint16 values. +// +// Deprecated: use [conv.ValueSlice] instead. +func Uint16ValueSlice(src []*uint16) []uint16 { return conv.ValueSlice(src) } + +// Uint16Map converts a string map of uint16 values into a string map of uint16 pointers. +// +// Deprecated: use [conv.PointerMap] instead. +func Uint16Map(src map[string]uint16) map[string]*uint16 { return conv.PointerMap(src) } + +// Uint16ValueMap converts a string map of uint16 pointers into a string map of uint16 values. +// +// Deprecated: use [conv.ValueMap] instead. +func Uint16ValueMap(src map[string]*uint16) map[string]uint16 { return conv.ValueMap(src) } + +// Uint turn a pointer to of the uint value passed in. +// +// Deprecated: use [conv.Pointer] instead. +func Uint(v uint) *uint { return conv.Pointer(v) } + +// UintValue turn the value of the uint pointer passed in or 0 if the pointer is nil. +// +// Deprecated: use [conv.Value] instead. +func UintValue(v *uint) uint { return conv.Value(v) } + +// UintSlice converts a slice of uint values into a slice of uint pointers. +// +// Deprecated: use [conv.PointerSlice] instead. +func UintSlice(src []uint) []*uint { return conv.PointerSlice(src) } + +// UintValueSlice converts a slice of uint pointers into a slice of uint values. +// +// Deprecated: use [conv.ValueSlice] instead. +func UintValueSlice(src []*uint) []uint { return conv.ValueSlice(src) } + +// UintMap converts a string map of uint values into a string map of uint pointers. +// +// Deprecated: use [conv.PointerMap] instead. +func UintMap(src map[string]uint) map[string]*uint { return conv.PointerMap(src) } + +// UintValueMap converts a string map of uint pointers into a string map of uint values. +// +// Deprecated: use [conv.ValueMap] instead. +func UintValueMap(src map[string]*uint) map[string]uint { return conv.ValueMap(src) } + +// Uint32 turn a pointer to of the uint32 value passed in. +// +// Deprecated: use [conv.Pointer] instead. +func Uint32(v uint32) *uint32 { return conv.Pointer(v) } + +// Uint32Value turn the value of the uint32 pointer passed in or 0 if the pointer is nil. +// +// Deprecated: use [conv.Value] instead. +func Uint32Value(v *uint32) uint32 { return conv.Value(v) } + +// Uint32Slice converts a slice of uint32 values into a slice of uint32 pointers. +// +// Deprecated: use [conv.PointerSlice] instead. +func Uint32Slice(src []uint32) []*uint32 { return conv.PointerSlice(src) } + +// Uint32ValueSlice converts a slice of uint32 pointers into a slice of uint32 values. +// +// Deprecated: use [conv.ValueSlice] instead. +func Uint32ValueSlice(src []*uint32) []uint32 { return conv.ValueSlice(src) } + +// Uint32Map converts a string map of uint32 values into a string map of uint32 pointers. +// +// Deprecated: use [conv.PointerMap] instead. +func Uint32Map(src map[string]uint32) map[string]*uint32 { return conv.PointerMap(src) } + +// Uint32ValueMap converts a string map of uint32 pointers into a string map of uint32 values. +// +// Deprecated: use [conv.ValueMap] instead. +func Uint32ValueMap(src map[string]*uint32) map[string]uint32 { return conv.ValueMap(src) } + +// Uint64 turn a pointer to of the uint64 value passed in. +// +// Deprecated: use [conv.Pointer] instead. +func Uint64(v uint64) *uint64 { return conv.Pointer(v) } + +// Uint64Value turn the value of the uint64 pointer passed in or 0 if the pointer is nil. +// +// Deprecated: use [conv.Value] instead. +func Uint64Value(v *uint64) uint64 { return conv.Value(v) } + +// Uint64Slice converts a slice of uint64 values into a slice of uint64 pointers. +// +// Deprecated: use [conv.PointerSlice] instead. +func Uint64Slice(src []uint64) []*uint64 { return conv.PointerSlice(src) } + +// Uint64ValueSlice converts a slice of uint64 pointers into a slice of uint64 values. +// +// Deprecated: use [conv.ValueSlice] instead. +func Uint64ValueSlice(src []*uint64) []uint64 { return conv.ValueSlice(src) } + +// Uint64Map converts a string map of uint64 values into a string map of uint64 pointers. +// +// Deprecated: use [conv.PointerMap] instead. +func Uint64Map(src map[string]uint64) map[string]*uint64 { return conv.PointerMap(src) } + +// Uint64ValueMap converts a string map of uint64 pointers into a string map of uint64 values. +// +// Deprecated: use [conv.ValueMap] instead. +func Uint64ValueMap(src map[string]*uint64) map[string]uint64 { return conv.ValueMap(src) } + +// Float32 turn a pointer to of the float32 value passed in. +// +// Deprecated: use [conv.Pointer] instead. +func Float32(v float32) *float32 { return conv.Pointer(v) } + +// Float32Value turn the value of the float32 pointer passed in or 0 if the pointer is nil. +// +// Deprecated: use [conv.Value] instead. +func Float32Value(v *float32) float32 { return conv.Value(v) } + +// Float32Slice converts a slice of float32 values into a slice of float32 pointers. +// +// Deprecated: use [conv.PointerSlice] instead. +func Float32Slice(src []float32) []*float32 { return conv.PointerSlice(src) } + +// Float32ValueSlice converts a slice of float32 pointers into a slice of float32 values. +// +// Deprecated: use [conv.ValueSlice] instead. +func Float32ValueSlice(src []*float32) []float32 { return conv.ValueSlice(src) } + +// Float32Map converts a string map of float32 values into a string map of float32 pointers. +// +// Deprecated: use [conv.PointerMap] instead. +func Float32Map(src map[string]float32) map[string]*float32 { return conv.PointerMap(src) } + +// Float32ValueMap converts a string map of float32 pointers into a string map of float32 values. +// +// Deprecated: use [conv.ValueMap] instead. +func Float32ValueMap(src map[string]*float32) map[string]float32 { return conv.ValueMap(src) } + +// Float64 turn a pointer to of the float64 value passed in. +// +// Deprecated: use [conv.Pointer] instead. +func Float64(v float64) *float64 { return conv.Pointer(v) } + +// Float64Value turn the value of the float64 pointer passed in or 0 if the pointer is nil. +// +// Deprecated: use [conv.Value] instead. +func Float64Value(v *float64) float64 { return conv.Value(v) } + +// Float64Slice converts a slice of float64 values into a slice of float64 pointers. +// +// Deprecated: use [conv.PointerSlice] instead. +func Float64Slice(src []float64) []*float64 { return conv.PointerSlice(src) } + +// Float64ValueSlice converts a slice of float64 pointers into a slice of float64 values. +// +// Deprecated: use [conv.ValueSlice] instead. +func Float64ValueSlice(src []*float64) []float64 { return conv.ValueSlice(src) } + +// Float64Map converts a string map of float64 values into a string map of float64 pointers. +// +// Deprecated: use [conv.PointerMap] instead. +func Float64Map(src map[string]float64) map[string]*float64 { return conv.PointerMap(src) } + +// Float64ValueMap converts a string map of float64 pointers into a string map of float64 values. +// +// Deprecated: use [conv.ValueMap] instead. +func Float64ValueMap(src map[string]*float64) map[string]float64 { return conv.ValueMap(src) } + +// Time turn a pointer to of the time.Time value passed in. +// +// Deprecated: use [conv.Pointer] instead. +func Time(v time.Time) *time.Time { return conv.Pointer(v) } + +// TimeValue turn the value of the time.Time pointer passed in or time.Time{} if the pointer is nil. +// +// Deprecated: use [conv.Value] instead. +func TimeValue(v *time.Time) time.Time { return conv.Value(v) } + +// TimeSlice converts a slice of time.Time values into a slice of time.Time pointers. +// +// Deprecated: use [conv.PointerSlice] instead. +func TimeSlice(src []time.Time) []*time.Time { return conv.PointerSlice(src) } + +// TimeValueSlice converts a slice of time.Time pointers into a slice of time.Time values +// +// Deprecated: use [conv.ValueSlice] instead. +func TimeValueSlice(src []*time.Time) []time.Time { return conv.ValueSlice(src) } + +// TimeMap converts a string map of time.Time values into a string map of time.Time pointers. +// +// Deprecated: use [conv.PointerMap] instead. +func TimeMap(src map[string]time.Time) map[string]*time.Time { return conv.PointerMap(src) } + +// TimeValueMap converts a string map of time.Time pointers into a string map of time.Time values. +// +// Deprecated: use [conv.ValueMap] instead. +func TimeValueMap(src map[string]*time.Time) map[string]time.Time { return conv.ValueMap(src) } diff --git a/vendor/github.com/go-openapi/swag/convert.go b/vendor/github.com/go-openapi/swag/convert.go deleted file mode 100644 index fc085aeb8e..0000000000 --- a/vendor/github.com/go-openapi/swag/convert.go +++ /dev/null @@ -1,208 +0,0 @@ -// Copyright 2015 go-swagger maintainers -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package swag - -import ( - "math" - "strconv" - "strings" -) - -// same as ECMA Number.MAX_SAFE_INTEGER and Number.MIN_SAFE_INTEGER -const ( - maxJSONFloat = float64(1<<53 - 1) // 9007199254740991.0 2^53 - 1 - minJSONFloat = -float64(1<<53 - 1) //-9007199254740991.0 -2^53 - 1 - epsilon float64 = 1e-9 -) - -// IsFloat64AJSONInteger allow for integers [-2^53, 2^53-1] inclusive -func IsFloat64AJSONInteger(f float64) bool { - if math.IsNaN(f) || math.IsInf(f, 0) || f < minJSONFloat || f > maxJSONFloat { - return false - } - fa := math.Abs(f) - g := float64(uint64(f)) - ga := math.Abs(g) - - diff := math.Abs(f - g) - - // more info: https://floating-point-gui.de/errors/comparison/#look-out-for-edge-cases - switch { - case f == g: // best case - return true - case f == float64(int64(f)) || f == float64(uint64(f)): // optimistic case - return true - case f == 0 || g == 0 || diff < math.SmallestNonzeroFloat64: // very close to 0 values - return diff < (epsilon * math.SmallestNonzeroFloat64) - } - // check the relative error - return diff/math.Min(fa+ga, math.MaxFloat64) < epsilon -} - -var evaluatesAsTrue map[string]struct{} - -func init() { - evaluatesAsTrue = map[string]struct{}{ - "true": {}, - "1": {}, - "yes": {}, - "ok": {}, - "y": {}, - "on": {}, - "selected": {}, - "checked": {}, - "t": {}, - "enabled": {}, - } -} - -// ConvertBool turn a string into a boolean -func ConvertBool(str string) (bool, error) { - _, ok := evaluatesAsTrue[strings.ToLower(str)] - return ok, nil -} - -// ConvertFloat32 turn a string into a float32 -func ConvertFloat32(str string) (float32, error) { - f, err := strconv.ParseFloat(str, 32) - if err != nil { - return 0, err - } - return float32(f), nil -} - -// ConvertFloat64 turn a string into a float64 -func ConvertFloat64(str string) (float64, error) { - return strconv.ParseFloat(str, 64) -} - -// ConvertInt8 turn a string into an int8 -func ConvertInt8(str string) (int8, error) { - i, err := strconv.ParseInt(str, 10, 8) - if err != nil { - return 0, err - } - return int8(i), nil -} - -// ConvertInt16 turn a string into an int16 -func ConvertInt16(str string) (int16, error) { - i, err := strconv.ParseInt(str, 10, 16) - if err != nil { - return 0, err - } - return int16(i), nil -} - -// ConvertInt32 turn a string into an int32 -func ConvertInt32(str string) (int32, error) { - i, err := strconv.ParseInt(str, 10, 32) - if err != nil { - return 0, err - } - return int32(i), nil -} - -// ConvertInt64 turn a string into an int64 -func ConvertInt64(str string) (int64, error) { - return strconv.ParseInt(str, 10, 64) -} - -// ConvertUint8 turn a string into an uint8 -func ConvertUint8(str string) (uint8, error) { - i, err := strconv.ParseUint(str, 10, 8) - if err != nil { - return 0, err - } - return uint8(i), nil -} - -// ConvertUint16 turn a string into an uint16 -func ConvertUint16(str string) (uint16, error) { - i, err := strconv.ParseUint(str, 10, 16) - if err != nil { - return 0, err - } - return uint16(i), nil -} - -// ConvertUint32 turn a string into an uint32 -func ConvertUint32(str string) (uint32, error) { - i, err := strconv.ParseUint(str, 10, 32) - if err != nil { - return 0, err - } - return uint32(i), nil -} - -// ConvertUint64 turn a string into an uint64 -func ConvertUint64(str string) (uint64, error) { - return strconv.ParseUint(str, 10, 64) -} - -// FormatBool turns a boolean into a string -func FormatBool(value bool) string { - return strconv.FormatBool(value) -} - -// FormatFloat32 turns a float32 into a string -func FormatFloat32(value float32) string { - return strconv.FormatFloat(float64(value), 'f', -1, 32) -} - -// FormatFloat64 turns a float64 into a string -func FormatFloat64(value float64) string { - return strconv.FormatFloat(value, 'f', -1, 64) -} - -// FormatInt8 turns an int8 into a string -func FormatInt8(value int8) string { - return strconv.FormatInt(int64(value), 10) -} - -// FormatInt16 turns an int16 into a string -func FormatInt16(value int16) string { - return strconv.FormatInt(int64(value), 10) -} - -// FormatInt32 turns an int32 into a string -func FormatInt32(value int32) string { - return strconv.Itoa(int(value)) -} - -// FormatInt64 turns an int64 into a string -func FormatInt64(value int64) string { - return strconv.FormatInt(value, 10) -} - -// FormatUint8 turns an uint8 into a string -func FormatUint8(value uint8) string { - return strconv.FormatUint(uint64(value), 10) -} - -// FormatUint16 turns an uint16 into a string -func FormatUint16(value uint16) string { - return strconv.FormatUint(uint64(value), 10) -} - -// FormatUint32 turns an uint32 into a string -func FormatUint32(value uint32) string { - return strconv.FormatUint(uint64(value), 10) -} - -// FormatUint64 turns an uint64 into a string -func FormatUint64(value uint64) string { - return strconv.FormatUint(value, 10) -} diff --git a/vendor/github.com/go-openapi/swag/convert_types.go b/vendor/github.com/go-openapi/swag/convert_types.go deleted file mode 100644 index c49cc473a8..0000000000 --- a/vendor/github.com/go-openapi/swag/convert_types.go +++ /dev/null @@ -1,730 +0,0 @@ -package swag - -import "time" - -// This file was taken from the aws go sdk - -// String returns a pointer to of the string value passed in. -func String(v string) *string { - return &v -} - -// StringValue returns the value of the string pointer passed in or -// "" if the pointer is nil. -func StringValue(v *string) string { - if v != nil { - return *v - } - return "" -} - -// StringSlice converts a slice of string values into a slice of -// string pointers -func StringSlice(src []string) []*string { - dst := make([]*string, len(src)) - for i := 0; i < len(src); i++ { - dst[i] = &(src[i]) - } - return dst -} - -// StringValueSlice converts a slice of string pointers into a slice of -// string values -func StringValueSlice(src []*string) []string { - dst := make([]string, len(src)) - for i := 0; i < len(src); i++ { - if src[i] != nil { - dst[i] = *(src[i]) - } - } - return dst -} - -// StringMap converts a string map of string values into a string -// map of string pointers -func StringMap(src map[string]string) map[string]*string { - dst := make(map[string]*string) - for k, val := range src { - v := val - dst[k] = &v - } - return dst -} - -// StringValueMap converts a string map of string pointers into a string -// map of string values -func StringValueMap(src map[string]*string) map[string]string { - dst := make(map[string]string) - for k, val := range src { - if val != nil { - dst[k] = *val - } - } - return dst -} - -// Bool returns a pointer to of the bool value passed in. -func Bool(v bool) *bool { - return &v -} - -// BoolValue returns the value of the bool pointer passed in or -// false if the pointer is nil. -func BoolValue(v *bool) bool { - if v != nil { - return *v - } - return false -} - -// BoolSlice converts a slice of bool values into a slice of -// bool pointers -func BoolSlice(src []bool) []*bool { - dst := make([]*bool, len(src)) - for i := 0; i < len(src); i++ { - dst[i] = &(src[i]) - } - return dst -} - -// BoolValueSlice converts a slice of bool pointers into a slice of -// bool values -func BoolValueSlice(src []*bool) []bool { - dst := make([]bool, len(src)) - for i := 0; i < len(src); i++ { - if src[i] != nil { - dst[i] = *(src[i]) - } - } - return dst -} - -// BoolMap converts a string map of bool values into a string -// map of bool pointers -func BoolMap(src map[string]bool) map[string]*bool { - dst := make(map[string]*bool) - for k, val := range src { - v := val - dst[k] = &v - } - return dst -} - -// BoolValueMap converts a string map of bool pointers into a string -// map of bool values -func BoolValueMap(src map[string]*bool) map[string]bool { - dst := make(map[string]bool) - for k, val := range src { - if val != nil { - dst[k] = *val - } - } - return dst -} - -// Int returns a pointer to of the int value passed in. -func Int(v int) *int { - return &v -} - -// IntValue returns the value of the int pointer passed in or -// 0 if the pointer is nil. -func IntValue(v *int) int { - if v != nil { - return *v - } - return 0 -} - -// IntSlice converts a slice of int values into a slice of -// int pointers -func IntSlice(src []int) []*int { - dst := make([]*int, len(src)) - for i := 0; i < len(src); i++ { - dst[i] = &(src[i]) - } - return dst -} - -// IntValueSlice converts a slice of int pointers into a slice of -// int values -func IntValueSlice(src []*int) []int { - dst := make([]int, len(src)) - for i := 0; i < len(src); i++ { - if src[i] != nil { - dst[i] = *(src[i]) - } - } - return dst -} - -// IntMap converts a string map of int values into a string -// map of int pointers -func IntMap(src map[string]int) map[string]*int { - dst := make(map[string]*int) - for k, val := range src { - v := val - dst[k] = &v - } - return dst -} - -// IntValueMap converts a string map of int pointers into a string -// map of int values -func IntValueMap(src map[string]*int) map[string]int { - dst := make(map[string]int) - for k, val := range src { - if val != nil { - dst[k] = *val - } - } - return dst -} - -// Int32 returns a pointer to of the int32 value passed in. -func Int32(v int32) *int32 { - return &v -} - -// Int32Value returns the value of the int32 pointer passed in or -// 0 if the pointer is nil. -func Int32Value(v *int32) int32 { - if v != nil { - return *v - } - return 0 -} - -// Int32Slice converts a slice of int32 values into a slice of -// int32 pointers -func Int32Slice(src []int32) []*int32 { - dst := make([]*int32, len(src)) - for i := 0; i < len(src); i++ { - dst[i] = &(src[i]) - } - return dst -} - -// Int32ValueSlice converts a slice of int32 pointers into a slice of -// int32 values -func Int32ValueSlice(src []*int32) []int32 { - dst := make([]int32, len(src)) - for i := 0; i < len(src); i++ { - if src[i] != nil { - dst[i] = *(src[i]) - } - } - return dst -} - -// Int32Map converts a string map of int32 values into a string -// map of int32 pointers -func Int32Map(src map[string]int32) map[string]*int32 { - dst := make(map[string]*int32) - for k, val := range src { - v := val - dst[k] = &v - } - return dst -} - -// Int32ValueMap converts a string map of int32 pointers into a string -// map of int32 values -func Int32ValueMap(src map[string]*int32) map[string]int32 { - dst := make(map[string]int32) - for k, val := range src { - if val != nil { - dst[k] = *val - } - } - return dst -} - -// Int64 returns a pointer to of the int64 value passed in. -func Int64(v int64) *int64 { - return &v -} - -// Int64Value returns the value of the int64 pointer passed in or -// 0 if the pointer is nil. -func Int64Value(v *int64) int64 { - if v != nil { - return *v - } - return 0 -} - -// Int64Slice converts a slice of int64 values into a slice of -// int64 pointers -func Int64Slice(src []int64) []*int64 { - dst := make([]*int64, len(src)) - for i := 0; i < len(src); i++ { - dst[i] = &(src[i]) - } - return dst -} - -// Int64ValueSlice converts a slice of int64 pointers into a slice of -// int64 values -func Int64ValueSlice(src []*int64) []int64 { - dst := make([]int64, len(src)) - for i := 0; i < len(src); i++ { - if src[i] != nil { - dst[i] = *(src[i]) - } - } - return dst -} - -// Int64Map converts a string map of int64 values into a string -// map of int64 pointers -func Int64Map(src map[string]int64) map[string]*int64 { - dst := make(map[string]*int64) - for k, val := range src { - v := val - dst[k] = &v - } - return dst -} - -// Int64ValueMap converts a string map of int64 pointers into a string -// map of int64 values -func Int64ValueMap(src map[string]*int64) map[string]int64 { - dst := make(map[string]int64) - for k, val := range src { - if val != nil { - dst[k] = *val - } - } - return dst -} - -// Uint16 returns a pointer to of the uint16 value passed in. -func Uint16(v uint16) *uint16 { - return &v -} - -// Uint16Value returns the value of the uint16 pointer passed in or -// 0 if the pointer is nil. -func Uint16Value(v *uint16) uint16 { - if v != nil { - return *v - } - - return 0 -} - -// Uint16Slice converts a slice of uint16 values into a slice of -// uint16 pointers -func Uint16Slice(src []uint16) []*uint16 { - dst := make([]*uint16, len(src)) - for i := 0; i < len(src); i++ { - dst[i] = &(src[i]) - } - - return dst -} - -// Uint16ValueSlice converts a slice of uint16 pointers into a slice of -// uint16 values -func Uint16ValueSlice(src []*uint16) []uint16 { - dst := make([]uint16, len(src)) - - for i := 0; i < len(src); i++ { - if src[i] != nil { - dst[i] = *(src[i]) - } - } - - return dst -} - -// Uint16Map converts a string map of uint16 values into a string -// map of uint16 pointers -func Uint16Map(src map[string]uint16) map[string]*uint16 { - dst := make(map[string]*uint16) - - for k, val := range src { - v := val - dst[k] = &v - } - - return dst -} - -// Uint16ValueMap converts a string map of uint16 pointers into a string -// map of uint16 values -func Uint16ValueMap(src map[string]*uint16) map[string]uint16 { - dst := make(map[string]uint16) - - for k, val := range src { - if val != nil { - dst[k] = *val - } - } - - return dst -} - -// Uint returns a pointer to of the uint value passed in. -func Uint(v uint) *uint { - return &v -} - -// UintValue returns the value of the uint pointer passed in or -// 0 if the pointer is nil. -func UintValue(v *uint) uint { - if v != nil { - return *v - } - return 0 -} - -// UintSlice converts a slice of uint values into a slice of -// uint pointers -func UintSlice(src []uint) []*uint { - dst := make([]*uint, len(src)) - for i := 0; i < len(src); i++ { - dst[i] = &(src[i]) - } - return dst -} - -// UintValueSlice converts a slice of uint pointers into a slice of -// uint values -func UintValueSlice(src []*uint) []uint { - dst := make([]uint, len(src)) - for i := 0; i < len(src); i++ { - if src[i] != nil { - dst[i] = *(src[i]) - } - } - return dst -} - -// UintMap converts a string map of uint values into a string -// map of uint pointers -func UintMap(src map[string]uint) map[string]*uint { - dst := make(map[string]*uint) - for k, val := range src { - v := val - dst[k] = &v - } - return dst -} - -// UintValueMap converts a string map of uint pointers into a string -// map of uint values -func UintValueMap(src map[string]*uint) map[string]uint { - dst := make(map[string]uint) - for k, val := range src { - if val != nil { - dst[k] = *val - } - } - return dst -} - -// Uint32 returns a pointer to of the uint32 value passed in. -func Uint32(v uint32) *uint32 { - return &v -} - -// Uint32Value returns the value of the uint32 pointer passed in or -// 0 if the pointer is nil. -func Uint32Value(v *uint32) uint32 { - if v != nil { - return *v - } - return 0 -} - -// Uint32Slice converts a slice of uint32 values into a slice of -// uint32 pointers -func Uint32Slice(src []uint32) []*uint32 { - dst := make([]*uint32, len(src)) - for i := 0; i < len(src); i++ { - dst[i] = &(src[i]) - } - return dst -} - -// Uint32ValueSlice converts a slice of uint32 pointers into a slice of -// uint32 values -func Uint32ValueSlice(src []*uint32) []uint32 { - dst := make([]uint32, len(src)) - for i := 0; i < len(src); i++ { - if src[i] != nil { - dst[i] = *(src[i]) - } - } - return dst -} - -// Uint32Map converts a string map of uint32 values into a string -// map of uint32 pointers -func Uint32Map(src map[string]uint32) map[string]*uint32 { - dst := make(map[string]*uint32) - for k, val := range src { - v := val - dst[k] = &v - } - return dst -} - -// Uint32ValueMap converts a string map of uint32 pointers into a string -// map of uint32 values -func Uint32ValueMap(src map[string]*uint32) map[string]uint32 { - dst := make(map[string]uint32) - for k, val := range src { - if val != nil { - dst[k] = *val - } - } - return dst -} - -// Uint64 returns a pointer to of the uint64 value passed in. -func Uint64(v uint64) *uint64 { - return &v -} - -// Uint64Value returns the value of the uint64 pointer passed in or -// 0 if the pointer is nil. -func Uint64Value(v *uint64) uint64 { - if v != nil { - return *v - } - return 0 -} - -// Uint64Slice converts a slice of uint64 values into a slice of -// uint64 pointers -func Uint64Slice(src []uint64) []*uint64 { - dst := make([]*uint64, len(src)) - for i := 0; i < len(src); i++ { - dst[i] = &(src[i]) - } - return dst -} - -// Uint64ValueSlice converts a slice of uint64 pointers into a slice of -// uint64 values -func Uint64ValueSlice(src []*uint64) []uint64 { - dst := make([]uint64, len(src)) - for i := 0; i < len(src); i++ { - if src[i] != nil { - dst[i] = *(src[i]) - } - } - return dst -} - -// Uint64Map converts a string map of uint64 values into a string -// map of uint64 pointers -func Uint64Map(src map[string]uint64) map[string]*uint64 { - dst := make(map[string]*uint64) - for k, val := range src { - v := val - dst[k] = &v - } - return dst -} - -// Uint64ValueMap converts a string map of uint64 pointers into a string -// map of uint64 values -func Uint64ValueMap(src map[string]*uint64) map[string]uint64 { - dst := make(map[string]uint64) - for k, val := range src { - if val != nil { - dst[k] = *val - } - } - return dst -} - -// Float32 returns a pointer to of the float32 value passed in. -func Float32(v float32) *float32 { - return &v -} - -// Float32Value returns the value of the float32 pointer passed in or -// 0 if the pointer is nil. -func Float32Value(v *float32) float32 { - if v != nil { - return *v - } - - return 0 -} - -// Float32Slice converts a slice of float32 values into a slice of -// float32 pointers -func Float32Slice(src []float32) []*float32 { - dst := make([]*float32, len(src)) - - for i := 0; i < len(src); i++ { - dst[i] = &(src[i]) - } - - return dst -} - -// Float32ValueSlice converts a slice of float32 pointers into a slice of -// float32 values -func Float32ValueSlice(src []*float32) []float32 { - dst := make([]float32, len(src)) - - for i := 0; i < len(src); i++ { - if src[i] != nil { - dst[i] = *(src[i]) - } - } - - return dst -} - -// Float32Map converts a string map of float32 values into a string -// map of float32 pointers -func Float32Map(src map[string]float32) map[string]*float32 { - dst := make(map[string]*float32) - - for k, val := range src { - v := val - dst[k] = &v - } - - return dst -} - -// Float32ValueMap converts a string map of float32 pointers into a string -// map of float32 values -func Float32ValueMap(src map[string]*float32) map[string]float32 { - dst := make(map[string]float32) - - for k, val := range src { - if val != nil { - dst[k] = *val - } - } - - return dst -} - -// Float64 returns a pointer to of the float64 value passed in. -func Float64(v float64) *float64 { - return &v -} - -// Float64Value returns the value of the float64 pointer passed in or -// 0 if the pointer is nil. -func Float64Value(v *float64) float64 { - if v != nil { - return *v - } - return 0 -} - -// Float64Slice converts a slice of float64 values into a slice of -// float64 pointers -func Float64Slice(src []float64) []*float64 { - dst := make([]*float64, len(src)) - for i := 0; i < len(src); i++ { - dst[i] = &(src[i]) - } - return dst -} - -// Float64ValueSlice converts a slice of float64 pointers into a slice of -// float64 values -func Float64ValueSlice(src []*float64) []float64 { - dst := make([]float64, len(src)) - for i := 0; i < len(src); i++ { - if src[i] != nil { - dst[i] = *(src[i]) - } - } - return dst -} - -// Float64Map converts a string map of float64 values into a string -// map of float64 pointers -func Float64Map(src map[string]float64) map[string]*float64 { - dst := make(map[string]*float64) - for k, val := range src { - v := val - dst[k] = &v - } - return dst -} - -// Float64ValueMap converts a string map of float64 pointers into a string -// map of float64 values -func Float64ValueMap(src map[string]*float64) map[string]float64 { - dst := make(map[string]float64) - for k, val := range src { - if val != nil { - dst[k] = *val - } - } - return dst -} - -// Time returns a pointer to of the time.Time value passed in. -func Time(v time.Time) *time.Time { - return &v -} - -// TimeValue returns the value of the time.Time pointer passed in or -// time.Time{} if the pointer is nil. -func TimeValue(v *time.Time) time.Time { - if v != nil { - return *v - } - return time.Time{} -} - -// TimeSlice converts a slice of time.Time values into a slice of -// time.Time pointers -func TimeSlice(src []time.Time) []*time.Time { - dst := make([]*time.Time, len(src)) - for i := 0; i < len(src); i++ { - dst[i] = &(src[i]) - } - return dst -} - -// TimeValueSlice converts a slice of time.Time pointers into a slice of -// time.Time values -func TimeValueSlice(src []*time.Time) []time.Time { - dst := make([]time.Time, len(src)) - for i := 0; i < len(src); i++ { - if src[i] != nil { - dst[i] = *(src[i]) - } - } - return dst -} - -// TimeMap converts a string map of time.Time values into a string -// map of time.Time pointers -func TimeMap(src map[string]time.Time) map[string]*time.Time { - dst := make(map[string]*time.Time) - for k, val := range src { - v := val - dst[k] = &v - } - return dst -} - -// TimeValueMap converts a string map of time.Time pointers into a string -// map of time.Time values -func TimeValueMap(src map[string]*time.Time) map[string]time.Time { - dst := make(map[string]time.Time) - for k, val := range src { - if val != nil { - dst[k] = *val - } - } - return dst -} diff --git a/vendor/github.com/go-openapi/swag/doc.go b/vendor/github.com/go-openapi/swag/doc.go index 55094cb74c..a079fe8106 100644 --- a/vendor/github.com/go-openapi/swag/doc.go +++ b/vendor/github.com/go-openapi/swag/doc.go @@ -12,20 +12,47 @@ // See the License for the specific language governing permissions and // limitations under the License. -/* -Package swag contains a bunch of helper functions for go-openapi and go-swagger projects. - -You may also use it standalone for your projects. - - - convert between value and pointers for builtin types - - convert from string to builtin types (wraps strconv) - - fast json concatenation - - search in path - - load from file or http - - name mangling - -This repo has only few dependencies outside of the standard library: - - - YAML utilities depend on gopkg.in/yaml.v2 -*/ +// Package swag contains a bunch of helper functions for go-openapi and go-swagger projects. +// +// You may also use it standalone for your projects. +// +// NOTE: all features that used to be exposed as package-level members (constants, variables, +// functions and types) are now deprecated and are superseded by equivalent features in +// more specialized sub-packages. +// Moving forward, no additional feature will be added to the [swag] API directly at the root package level, +// which remains there for backward-compatibility purposes. +// +// Child modules will continue to evolve or some new ones may be added in the future. +// +// # Modules +// +// - [cmdutils] utilities to work with CLIs +// +// - [conv] type conversion utilities +// +// - [fileutils] file utilities +// +// - [jsonname] JSON utilities +// +// - [jsonutils] JSON utilities +// +// - [loading] file loading +// +// - [mangling] safe name generation +// +// - [netutils] networking utilities +// +// - [stringutils] `string` utilities +// +// - [typeutils] `go` types utilities +// +// - [yamlutils] YAML utilities +// +// # Dependencies +// +// This repo has a few dependencies outside of the standard library: +// +// - YAML utilities depend on [go.yaml.in/yaml/v3] package swag + +//go:generate mockery diff --git a/vendor/github.com/go-openapi/swag/fileutils/LICENSE b/vendor/github.com/go-openapi/swag/fileutils/LICENSE new file mode 100644 index 0000000000..d645695673 --- /dev/null +++ b/vendor/github.com/go-openapi/swag/fileutils/LICENSE @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/go-openapi/swag/fileutils/doc.go b/vendor/github.com/go-openapi/swag/fileutils/doc.go new file mode 100644 index 0000000000..4b48e71960 --- /dev/null +++ b/vendor/github.com/go-openapi/swag/fileutils/doc.go @@ -0,0 +1,21 @@ +// Copyright 2015 go-swagger maintainers +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package fileutils exposes utilities to deal with files and paths. +// +// Currently, there is: +// - [File] to represent an abstraction of an uploaded file. +// For instance, this is used by [github.com/go-openapi/runtime.File]. +// - path search utilities (e.g. finding packages in the GO search path) +package fileutils diff --git a/vendor/github.com/go-openapi/swag/file.go b/vendor/github.com/go-openapi/swag/fileutils/file.go similarity index 98% rename from vendor/github.com/go-openapi/swag/file.go rename to vendor/github.com/go-openapi/swag/fileutils/file.go index 16accc55f8..b17eaba58f 100644 --- a/vendor/github.com/go-openapi/swag/file.go +++ b/vendor/github.com/go-openapi/swag/fileutils/file.go @@ -12,7 +12,7 @@ // See the License for the specific language governing permissions and // limitations under the License. -package swag +package fileutils import "mime/multipart" diff --git a/vendor/github.com/go-openapi/swag/path.go b/vendor/github.com/go-openapi/swag/fileutils/path.go similarity index 80% rename from vendor/github.com/go-openapi/swag/path.go rename to vendor/github.com/go-openapi/swag/fileutils/path.go index 941bd0176b..0de77e12d4 100644 --- a/vendor/github.com/go-openapi/swag/path.go +++ b/vendor/github.com/go-openapi/swag/fileutils/path.go @@ -12,7 +12,7 @@ // See the License for the specific language governing permissions and // limitations under the License. -package swag +package fileutils import ( "os" @@ -21,10 +21,8 @@ import ( "strings" ) -const ( - // GOPATHKey represents the env key for gopath - GOPATHKey = "GOPATH" -) +// GOPATHKey represents the env key for gopath +const GOPATHKey = "GOPATH" // FindInSearchPath finds a package in a provided lists of paths func FindInSearchPath(searchPath, pkg string) string { @@ -40,11 +38,17 @@ func FindInSearchPath(searchPath, pkg string) string { } // FindInGoSearchPath finds a package in the $GOPATH:$GOROOT +// +// Deprecated: this function is no longer relevant with modern go. +// It uses [runtime.GOROOT] under the hood, which is deprecated as of go1.24. func FindInGoSearchPath(pkg string) string { return FindInSearchPath(FullGoSearchPath(), pkg) } // FullGoSearchPath gets the search paths for finding packages +// +// Deprecated: this function is no longer relevant with modern go. +// It uses [runtime.GOROOT] under the hood, which is deprecated as of go1.24. func FullGoSearchPath() string { allPaths := os.Getenv(GOPATHKey) if allPaths == "" { diff --git a/vendor/github.com/go-openapi/swag/fileutils_iface.go b/vendor/github.com/go-openapi/swag/fileutils_iface.go new file mode 100644 index 0000000000..0c639e8c16 --- /dev/null +++ b/vendor/github.com/go-openapi/swag/fileutils_iface.go @@ -0,0 +1,44 @@ +// Copyright 2015 go-swagger maintainers +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package swag + +import "github.com/go-openapi/swag/fileutils" + +// GOPATHKey represents the env key for gopath +// +// Deprecated: use [fileutils.GOPATHKey] instead. +const GOPATHKey = fileutils.GOPATHKey + +// File represents an uploaded file. +// +// Deprecated: use [fileutils.File] instead. +type File = fileutils.File + +// FindInSearchPath finds a package in a provided lists of paths. +// +// Deprecated: use [fileutils.FindInSearchPath] instead. +func FindInSearchPath(searchPath, pkg string) string { + return fileutils.FindInSearchPath(searchPath, pkg) +} + +// FindInGoSearchPath finds a package in the $GOPATH:$GOROOT +// +// Deprecated: use [fileutils.FindInGoSearchPath] instead. +func FindInGoSearchPath(pkg string) string { return fileutils.FindInGoSearchPath(pkg) } + +// FullGoSearchPath gets the search paths for finding packages +// +// Deprecated: use [fileutils.FullGoSearchPath] instead. +func FullGoSearchPath() string { return fileutils.FullGoSearchPath() } diff --git a/vendor/github.com/go-openapi/swag/go.work b/vendor/github.com/go-openapi/swag/go.work new file mode 100644 index 0000000000..1e537f0749 --- /dev/null +++ b/vendor/github.com/go-openapi/swag/go.work @@ -0,0 +1,20 @@ +use ( + . + ./cmdutils + ./conv + ./fileutils + ./jsonname + ./jsonutils + ./jsonutils/adapters/easyjson + ./jsonutils/adapters/testintegration + ./jsonutils/adapters/testintegration/benchmarks + ./jsonutils/fixtures_test + ./loading + ./mangling + ./netutils + ./stringutils + ./typeutils + ./yamlutils +) + +go 1.24.0 diff --git a/vendor/github.com/go-openapi/swag/go.work.sum b/vendor/github.com/go-openapi/swag/go.work.sum new file mode 100644 index 0000000000..bee4481a70 --- /dev/null +++ b/vendor/github.com/go-openapi/swag/go.work.sum @@ -0,0 +1,4 @@ +github.com/stretchr/objx v0.5.2/go.mod h1:FRsXN1f5AsAjCGJKqEizvkpNtU+EGNCLh3NxZ/8L+MA= +github.com/stretchr/testify v1.11.0/go.mod h1:wZwfW3scLgRK+23gO65QZefKpKQRnfz6sD981Nm4B6U= +go.yaml.in/yaml/v3 v3.0.4 h1:tfq32ie2Jv2UxXFdLJdh3jXuOzWiL1fo0bu/FbuKpbc= +go.yaml.in/yaml/v3 v3.0.4/go.mod h1:DhzuOOF2ATzADvBadXxruRBLzYTpT36CKvDb3+aBEFg= diff --git a/vendor/github.com/go-openapi/swag/initialism_index.go b/vendor/github.com/go-openapi/swag/initialism_index.go deleted file mode 100644 index 20a359bb60..0000000000 --- a/vendor/github.com/go-openapi/swag/initialism_index.go +++ /dev/null @@ -1,202 +0,0 @@ -// Copyright 2015 go-swagger maintainers -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package swag - -import ( - "sort" - "strings" - "sync" -) - -var ( - // commonInitialisms are common acronyms that are kept as whole uppercased words. - commonInitialisms *indexOfInitialisms - - // initialisms is a slice of sorted initialisms - initialisms []string - - // a copy of initialisms pre-baked as []rune - initialismsRunes [][]rune - initialismsUpperCased [][]rune - - isInitialism func(string) bool - - maxAllocMatches int -) - -func init() { - // Taken from https://github.com/golang/lint/blob/3390df4df2787994aea98de825b964ac7944b817/lint.go#L732-L769 - configuredInitialisms := map[string]bool{ - "ACL": true, - "API": true, - "ASCII": true, - "CPU": true, - "CSS": true, - "DNS": true, - "EOF": true, - "GUID": true, - "HTML": true, - "HTTPS": true, - "HTTP": true, - "ID": true, - "IP": true, - "IPv4": true, - "IPv6": true, - "JSON": true, - "LHS": true, - "OAI": true, - "QPS": true, - "RAM": true, - "RHS": true, - "RPC": true, - "SLA": true, - "SMTP": true, - "SQL": true, - "SSH": true, - "TCP": true, - "TLS": true, - "TTL": true, - "UDP": true, - "UI": true, - "UID": true, - "UUID": true, - "URI": true, - "URL": true, - "UTF8": true, - "VM": true, - "XML": true, - "XMPP": true, - "XSRF": true, - "XSS": true, - } - - // a thread-safe index of initialisms - commonInitialisms = newIndexOfInitialisms().load(configuredInitialisms) - initialisms = commonInitialisms.sorted() - initialismsRunes = asRunes(initialisms) - initialismsUpperCased = asUpperCased(initialisms) - maxAllocMatches = maxAllocHeuristic(initialismsRunes) - - // a test function - isInitialism = commonInitialisms.isInitialism -} - -func asRunes(in []string) [][]rune { - out := make([][]rune, len(in)) - for i, initialism := range in { - out[i] = []rune(initialism) - } - - return out -} - -func asUpperCased(in []string) [][]rune { - out := make([][]rune, len(in)) - - for i, initialism := range in { - out[i] = []rune(upper(trim(initialism))) - } - - return out -} - -func maxAllocHeuristic(in [][]rune) int { - heuristic := make(map[rune]int) - for _, initialism := range in { - heuristic[initialism[0]]++ - } - - var maxAlloc int - for _, val := range heuristic { - if val > maxAlloc { - maxAlloc = val - } - } - - return maxAlloc -} - -// AddInitialisms add additional initialisms -func AddInitialisms(words ...string) { - for _, word := range words { - // commonInitialisms[upper(word)] = true - commonInitialisms.add(upper(word)) - } - // sort again - initialisms = commonInitialisms.sorted() - initialismsRunes = asRunes(initialisms) - initialismsUpperCased = asUpperCased(initialisms) -} - -// indexOfInitialisms is a thread-safe implementation of the sorted index of initialisms. -// Since go1.9, this may be implemented with sync.Map. -type indexOfInitialisms struct { - sortMutex *sync.Mutex - index *sync.Map -} - -func newIndexOfInitialisms() *indexOfInitialisms { - return &indexOfInitialisms{ - sortMutex: new(sync.Mutex), - index: new(sync.Map), - } -} - -func (m *indexOfInitialisms) load(initial map[string]bool) *indexOfInitialisms { - m.sortMutex.Lock() - defer m.sortMutex.Unlock() - for k, v := range initial { - m.index.Store(k, v) - } - return m -} - -func (m *indexOfInitialisms) isInitialism(key string) bool { - _, ok := m.index.Load(key) - return ok -} - -func (m *indexOfInitialisms) add(key string) *indexOfInitialisms { - m.index.Store(key, true) - return m -} - -func (m *indexOfInitialisms) sorted() (result []string) { - m.sortMutex.Lock() - defer m.sortMutex.Unlock() - m.index.Range(func(key, _ interface{}) bool { - k := key.(string) - result = append(result, k) - return true - }) - sort.Sort(sort.Reverse(byInitialism(result))) - return -} - -type byInitialism []string - -func (s byInitialism) Len() int { - return len(s) -} -func (s byInitialism) Swap(i, j int) { - s[i], s[j] = s[j], s[i] -} -func (s byInitialism) Less(i, j int) bool { - if len(s[i]) != len(s[j]) { - return len(s[i]) < len(s[j]) - } - - return strings.Compare(s[i], s[j]) > 0 -} diff --git a/vendor/github.com/go-openapi/swag/json.go b/vendor/github.com/go-openapi/swag/json.go deleted file mode 100644 index 7e9902ca31..0000000000 --- a/vendor/github.com/go-openapi/swag/json.go +++ /dev/null @@ -1,312 +0,0 @@ -// Copyright 2015 go-swagger maintainers -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package swag - -import ( - "bytes" - "encoding/json" - "log" - "reflect" - "strings" - "sync" - - "github.com/mailru/easyjson/jlexer" - "github.com/mailru/easyjson/jwriter" -) - -// nullJSON represents a JSON object with null type -var nullJSON = []byte("null") - -// DefaultJSONNameProvider the default cache for types -var DefaultJSONNameProvider = NewNameProvider() - -const comma = byte(',') - -var closers map[byte]byte - -func init() { - closers = map[byte]byte{ - '{': '}', - '[': ']', - } -} - -type ejMarshaler interface { - MarshalEasyJSON(w *jwriter.Writer) -} - -type ejUnmarshaler interface { - UnmarshalEasyJSON(w *jlexer.Lexer) -} - -// WriteJSON writes json data, prefers finding an appropriate interface to short-circuit the marshaler -// so it takes the fastest option available. -func WriteJSON(data interface{}) ([]byte, error) { - if d, ok := data.(ejMarshaler); ok { - jw := new(jwriter.Writer) - d.MarshalEasyJSON(jw) - return jw.BuildBytes() - } - if d, ok := data.(json.Marshaler); ok { - return d.MarshalJSON() - } - return json.Marshal(data) -} - -// ReadJSON reads json data, prefers finding an appropriate interface to short-circuit the unmarshaler -// so it takes the fastest option available -func ReadJSON(data []byte, value interface{}) error { - trimmedData := bytes.Trim(data, "\x00") - if d, ok := value.(ejUnmarshaler); ok { - jl := &jlexer.Lexer{Data: trimmedData} - d.UnmarshalEasyJSON(jl) - return jl.Error() - } - if d, ok := value.(json.Unmarshaler); ok { - return d.UnmarshalJSON(trimmedData) - } - return json.Unmarshal(trimmedData, value) -} - -// DynamicJSONToStruct converts an untyped json structure into a struct -func DynamicJSONToStruct(data interface{}, target interface{}) error { - // TODO: convert straight to a json typed map (mergo + iterate?) - b, err := WriteJSON(data) - if err != nil { - return err - } - return ReadJSON(b, target) -} - -// ConcatJSON concatenates multiple json objects efficiently -func ConcatJSON(blobs ...[]byte) []byte { - if len(blobs) == 0 { - return nil - } - - last := len(blobs) - 1 - for blobs[last] == nil || bytes.Equal(blobs[last], nullJSON) { - // strips trailing null objects - last-- - if last < 0 { - // there was nothing but "null"s or nil... - return nil - } - } - if last == 0 { - return blobs[0] - } - - var opening, closing byte - var idx, a int - buf := bytes.NewBuffer(nil) - - for i, b := range blobs[:last+1] { - if b == nil || bytes.Equal(b, nullJSON) { - // a null object is in the list: skip it - continue - } - if len(b) > 0 && opening == 0 { // is this an array or an object? - opening, closing = b[0], closers[b[0]] - } - - if opening != '{' && opening != '[' { - continue // don't know how to concatenate non container objects - } - - if len(b) < 3 { // yep empty but also the last one, so closing this thing - if i == last && a > 0 { - if err := buf.WriteByte(closing); err != nil { - log.Println(err) - } - } - continue - } - - idx = 0 - if a > 0 { // we need to join with a comma for everything beyond the first non-empty item - if err := buf.WriteByte(comma); err != nil { - log.Println(err) - } - idx = 1 // this is not the first or the last so we want to drop the leading bracket - } - - if i != last { // not the last one, strip brackets - if _, err := buf.Write(b[idx : len(b)-1]); err != nil { - log.Println(err) - } - } else { // last one, strip only the leading bracket - if _, err := buf.Write(b[idx:]); err != nil { - log.Println(err) - } - } - a++ - } - // somehow it ended up being empty, so provide a default value - if buf.Len() == 0 { - if err := buf.WriteByte(opening); err != nil { - log.Println(err) - } - if err := buf.WriteByte(closing); err != nil { - log.Println(err) - } - } - return buf.Bytes() -} - -// ToDynamicJSON turns an object into a properly JSON typed structure -func ToDynamicJSON(data interface{}) interface{} { - // TODO: convert straight to a json typed map (mergo + iterate?) - b, err := json.Marshal(data) - if err != nil { - log.Println(err) - } - var res interface{} - if err := json.Unmarshal(b, &res); err != nil { - log.Println(err) - } - return res -} - -// FromDynamicJSON turns an object into a properly JSON typed structure -func FromDynamicJSON(data, target interface{}) error { - b, err := json.Marshal(data) - if err != nil { - log.Println(err) - } - return json.Unmarshal(b, target) -} - -// NameProvider represents an object capable of translating from go property names -// to json property names -// This type is thread-safe. -type NameProvider struct { - lock *sync.Mutex - index map[reflect.Type]nameIndex -} - -type nameIndex struct { - jsonNames map[string]string - goNames map[string]string -} - -// NewNameProvider creates a new name provider -func NewNameProvider() *NameProvider { - return &NameProvider{ - lock: &sync.Mutex{}, - index: make(map[reflect.Type]nameIndex), - } -} - -func buildnameIndex(tpe reflect.Type, idx, reverseIdx map[string]string) { - for i := 0; i < tpe.NumField(); i++ { - targetDes := tpe.Field(i) - - if targetDes.PkgPath != "" { // unexported - continue - } - - if targetDes.Anonymous { // walk embedded structures tree down first - buildnameIndex(targetDes.Type, idx, reverseIdx) - continue - } - - if tag := targetDes.Tag.Get("json"); tag != "" { - - parts := strings.Split(tag, ",") - if len(parts) == 0 { - continue - } - - nm := parts[0] - if nm == "-" { - continue - } - if nm == "" { // empty string means we want to use the Go name - nm = targetDes.Name - } - - idx[nm] = targetDes.Name - reverseIdx[targetDes.Name] = nm - } - } -} - -func newNameIndex(tpe reflect.Type) nameIndex { - var idx = make(map[string]string, tpe.NumField()) - var reverseIdx = make(map[string]string, tpe.NumField()) - - buildnameIndex(tpe, idx, reverseIdx) - return nameIndex{jsonNames: idx, goNames: reverseIdx} -} - -// GetJSONNames gets all the json property names for a type -func (n *NameProvider) GetJSONNames(subject interface{}) []string { - n.lock.Lock() - defer n.lock.Unlock() - tpe := reflect.Indirect(reflect.ValueOf(subject)).Type() - names, ok := n.index[tpe] - if !ok { - names = n.makeNameIndex(tpe) - } - - res := make([]string, 0, len(names.jsonNames)) - for k := range names.jsonNames { - res = append(res, k) - } - return res -} - -// GetJSONName gets the json name for a go property name -func (n *NameProvider) GetJSONName(subject interface{}, name string) (string, bool) { - tpe := reflect.Indirect(reflect.ValueOf(subject)).Type() - return n.GetJSONNameForType(tpe, name) -} - -// GetJSONNameForType gets the json name for a go property name on a given type -func (n *NameProvider) GetJSONNameForType(tpe reflect.Type, name string) (string, bool) { - n.lock.Lock() - defer n.lock.Unlock() - names, ok := n.index[tpe] - if !ok { - names = n.makeNameIndex(tpe) - } - nme, ok := names.goNames[name] - return nme, ok -} - -func (n *NameProvider) makeNameIndex(tpe reflect.Type) nameIndex { - names := newNameIndex(tpe) - n.index[tpe] = names - return names -} - -// GetGoName gets the go name for a json property name -func (n *NameProvider) GetGoName(subject interface{}, name string) (string, bool) { - tpe := reflect.Indirect(reflect.ValueOf(subject)).Type() - return n.GetGoNameForType(tpe, name) -} - -// GetGoNameForType gets the go name for a given type for a json property name -func (n *NameProvider) GetGoNameForType(tpe reflect.Type, name string) (string, bool) { - n.lock.Lock() - defer n.lock.Unlock() - names, ok := n.index[tpe] - if !ok { - names = n.makeNameIndex(tpe) - } - nme, ok := names.jsonNames[name] - return nme, ok -} diff --git a/vendor/github.com/go-openapi/swag/jsonname/LICENSE b/vendor/github.com/go-openapi/swag/jsonname/LICENSE new file mode 100644 index 0000000000..d645695673 --- /dev/null +++ b/vendor/github.com/go-openapi/swag/jsonname/LICENSE @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/go-openapi/swag/jsonname/doc.go b/vendor/github.com/go-openapi/swag/jsonname/doc.go new file mode 100644 index 0000000000..b2e0c80fc3 --- /dev/null +++ b/vendor/github.com/go-openapi/swag/jsonname/doc.go @@ -0,0 +1,16 @@ +// Copyright 2015 go-swagger maintainers +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package jsonname is a provider of json property names from go properties. +package jsonname diff --git a/vendor/github.com/go-openapi/swag/jsonname/name_provider.go b/vendor/github.com/go-openapi/swag/jsonname/name_provider.go new file mode 100644 index 0000000000..e87aac2f78 --- /dev/null +++ b/vendor/github.com/go-openapi/swag/jsonname/name_provider.go @@ -0,0 +1,149 @@ +// Copyright 2015 go-swagger maintainers +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package jsonname + +import ( + "reflect" + "strings" + "sync" +) + +// DefaultJSONNameProvider is the default cache for types. +var DefaultJSONNameProvider = NewNameProvider() + +// NameProvider represents an object capable of translating from go property names +// to json property names. +// +// This type is thread-safe. +// +// See [github.com/go-openapi/jsonpointer.Pointer] for an example. +type NameProvider struct { + lock *sync.Mutex + index map[reflect.Type]nameIndex +} + +type nameIndex struct { + jsonNames map[string]string + goNames map[string]string +} + +// NewNameProvider creates a new name provider +func NewNameProvider() *NameProvider { + return &NameProvider{ + lock: &sync.Mutex{}, + index: make(map[reflect.Type]nameIndex), + } +} + +func buildnameIndex(tpe reflect.Type, idx, reverseIdx map[string]string) { + for i := 0; i < tpe.NumField(); i++ { + targetDes := tpe.Field(i) + + if targetDes.PkgPath != "" { // unexported + continue + } + + if targetDes.Anonymous { // walk embedded structures tree down first + buildnameIndex(targetDes.Type, idx, reverseIdx) + continue + } + + if tag := targetDes.Tag.Get("json"); tag != "" { + + parts := strings.Split(tag, ",") + if len(parts) == 0 { + continue + } + + nm := parts[0] + if nm == "-" { + continue + } + if nm == "" { // empty string means we want to use the Go name + nm = targetDes.Name + } + + idx[nm] = targetDes.Name + reverseIdx[targetDes.Name] = nm + } + } +} + +func newNameIndex(tpe reflect.Type) nameIndex { + var idx = make(map[string]string, tpe.NumField()) + var reverseIdx = make(map[string]string, tpe.NumField()) + + buildnameIndex(tpe, idx, reverseIdx) + return nameIndex{jsonNames: idx, goNames: reverseIdx} +} + +// GetJSONNames gets all the json property names for a type +func (n *NameProvider) GetJSONNames(subject interface{}) []string { + n.lock.Lock() + defer n.lock.Unlock() + tpe := reflect.Indirect(reflect.ValueOf(subject)).Type() + names, ok := n.index[tpe] + if !ok { + names = n.makeNameIndex(tpe) + } + + res := make([]string, 0, len(names.jsonNames)) + for k := range names.jsonNames { + res = append(res, k) + } + return res +} + +// GetJSONName gets the json name for a go property name +func (n *NameProvider) GetJSONName(subject interface{}, name string) (string, bool) { + tpe := reflect.Indirect(reflect.ValueOf(subject)).Type() + return n.GetJSONNameForType(tpe, name) +} + +// GetJSONNameForType gets the json name for a go property name on a given type +func (n *NameProvider) GetJSONNameForType(tpe reflect.Type, name string) (string, bool) { + n.lock.Lock() + defer n.lock.Unlock() + names, ok := n.index[tpe] + if !ok { + names = n.makeNameIndex(tpe) + } + nme, ok := names.goNames[name] + return nme, ok +} + +// GetGoName gets the go name for a json property name +func (n *NameProvider) GetGoName(subject interface{}, name string) (string, bool) { + tpe := reflect.Indirect(reflect.ValueOf(subject)).Type() + return n.GetGoNameForType(tpe, name) +} + +// GetGoNameForType gets the go name for a given type for a json property name +func (n *NameProvider) GetGoNameForType(tpe reflect.Type, name string) (string, bool) { + n.lock.Lock() + defer n.lock.Unlock() + names, ok := n.index[tpe] + if !ok { + names = n.makeNameIndex(tpe) + } + nme, ok := names.jsonNames[name] + return nme, ok +} + +func (n *NameProvider) makeNameIndex(tpe reflect.Type) nameIndex { + names := newNameIndex(tpe) + n.index[tpe] = names + return names +} diff --git a/vendor/github.com/go-openapi/swag/jsonname_iface.go b/vendor/github.com/go-openapi/swag/jsonname_iface.go new file mode 100644 index 0000000000..555369d75a --- /dev/null +++ b/vendor/github.com/go-openapi/swag/jsonname_iface.go @@ -0,0 +1,35 @@ +// Copyright 2015 go-swagger maintainers +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package swag + +import ( + "github.com/go-openapi/swag/jsonname" +) + +// DefaultJSONNameProvider is the default cache for types +// +// Deprecated: use [jsonname.DefaultJSONNameProvider] instead. +var DefaultJSONNameProvider = jsonname.DefaultJSONNameProvider + +// NameProvider represents an object capable of translating from go property names +// to json property names. +// +// Deprecated: use [jsonname.NameProvider] instead. +type NameProvider = jsonname.NameProvider + +// NewNameProvider creates a new name provider +// +// Deprecated: use [jsonname.NewNameProvider] instead. +func NewNameProvider() *NameProvider { return jsonname.NewNameProvider() } diff --git a/vendor/github.com/go-openapi/swag/jsonutils/LICENSE b/vendor/github.com/go-openapi/swag/jsonutils/LICENSE new file mode 100644 index 0000000000..d645695673 --- /dev/null +++ b/vendor/github.com/go-openapi/swag/jsonutils/LICENSE @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/go-openapi/swag/jsonutils/README.md b/vendor/github.com/go-openapi/swag/jsonutils/README.md new file mode 100644 index 0000000000..c8d0cab67b --- /dev/null +++ b/vendor/github.com/go-openapi/swag/jsonutils/README.md @@ -0,0 +1,108 @@ + # jsonutils + +`jsonutils` exposes a few tools to work with JSON: + +- a fast, simple `Concat` to concatenate (not merge) JSON objects and arrays +- `FromDynamicJSON` to convert a data structure into a "dynamic JSON" data structure +- `ReadJSON` and `WriteJSON` behave like `json.Unmarshal` and `json.Marshal`, + with the ability to use another underlying serialization library through an `Adapter` + configured at runtime +- a `JSONMapSlice` structure that may be used to store JSON objects with the order of keys maintained + +## Dynamic JSON + +We call "dynamic JSON" the go data structure that results from unmarshaling JSON like this: + +```go + var value any + jsonBytes := `{"a": 1, ... }` + _ = json.Unmarshal(jsonBytes, &value) +``` + +In this configuration, the standard library mappings are as follows: + +| JSON | go | +|-----------|------------------| +| `number` | `float64` | +| `string` | `string` | +| `boolean` | `bool` | +| `null` | `nil` | +| `object` | `map[string]any` | +| `array` | `[]any` | + +## Map slices + +When using `JSONMapSlice`, the ordering of keys is ensured by replacing +mappings to `map[string]any` by a `JSONMapSlice` which is an (ordered) +slice of `JSONMapItem`s. + +Notice that a similar feature is available for YAML (see [`yamlutils`](../yamlutils)), +with a `YAMLMapSlice` type based on the `JSONMapSlice`. + +`JSONMapSlice` is similar to an ordered map, but the keys are not retrieved +in constant time. + +Another difference with the the above standard mappings is that numbers don't always map +to a `float64`: if the value is a JSON integer, it unmarshals to `int64`. + +See also [some examples](https://pkg.go.dev/github.com/go-openapi/swag/jsonutils#pkg-examples) + +## Adapters + +`ReadJSON`, `WriteJSON` and `FromDynamicJSON` (which is a combination of the latter two) +are wrappers on top of `json.Unmarshal` and `json.Marshal`. + +By default, the adapter merely wraps the standard library. + +The adapter may be used to register other JSON serialization libraries, +possibly several ones at the same time. + +If the value passed is identified as an "ordered map" (i.e. implements `ifaces.Ordered` +or `ifaces.SetOrdered`, the adapter favors the "ordered" JSON behavior and tries to +find a registered implementation that support ordered keys in objects. + +Our standard library implementation supports this. + +As of `v0.25.0`, we support through such an adapter the popular `mailru/easyjson` +library, which kicks in when the passed values support the `easyjson.Unmarshaler` +or `easyjson.Marshaler` interfaces. + +In the future, we plan to add more similar libraries that compete on the go JSON +serializers scene. + +## Registering an adapter + +In package `github.com/go-openapi/swag/easyjson/adapters`, several adapters are available. + +Each adapter is an independent go module. Hence you'll pick its dependencies only if you import it. + +At this moment we provide: +* `stdlib`: JSON adapter based on the standard library +* `easyjson`: JSON adapter based on the `github.com/mailru/easyyjson` + +The adapters provide the basic `Marshal` and `Unmarshal` capabilities, plus an implementation +of the `MapSlice` pattern. + +You may also build your own adapter based on your specific use-case. An adapter is not required to implement +all capabilities. + +Every adapter comes with a `Register` function, possibly with some options, to register the adapter +to a global registry. + +For example, to enable `easyjson` to be used in `ReadJSON` and `WriteJSON`, you would write something like: + +```go + import ( + "github.com/go-openapi/swag/jsonutils/adapters" + easyjson "github.com/go-openapi/swag/jsonutils/adapters/easyjson/json" + ) + + func init() { + easyjson.Register(adapters.Registry) + } +``` + +You may register several adapters. In this case, capability matching is evaluated from the last registered +adapters (LIFO). + +## [Benchmarks](./adapters/testintegration/benchmarks/README.md) diff --git a/vendor/github.com/go-openapi/swag/jsonutils/adapters/doc.go b/vendor/github.com/go-openapi/swag/jsonutils/adapters/doc.go new file mode 100644 index 0000000000..dbb38c2f0c --- /dev/null +++ b/vendor/github.com/go-openapi/swag/jsonutils/adapters/doc.go @@ -0,0 +1,19 @@ +// Copyright 2015 go-swagger maintainers +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package adapters exposes a registry of adapters to multiple +// JSON serialization libraries. +// +// All interfaces are defined in package [ifaces.Adapter]. +package adapters diff --git a/vendor/github.com/go-openapi/swag/jsonutils/adapters/ifaces/doc.go b/vendor/github.com/go-openapi/swag/jsonutils/adapters/ifaces/doc.go new file mode 100644 index 0000000000..49649859af --- /dev/null +++ b/vendor/github.com/go-openapi/swag/jsonutils/adapters/ifaces/doc.go @@ -0,0 +1,2 @@ +// Package ifaces exposes all interfaces to work with adapters. +package ifaces diff --git a/vendor/github.com/go-openapi/swag/jsonutils/adapters/ifaces/ifaces.go b/vendor/github.com/go-openapi/swag/jsonutils/adapters/ifaces/ifaces.go new file mode 100644 index 0000000000..4927d872da --- /dev/null +++ b/vendor/github.com/go-openapi/swag/jsonutils/adapters/ifaces/ifaces.go @@ -0,0 +1,81 @@ +package ifaces + +import ( + _ "encoding/json" // for documentation purpose + "iter" +) + +// Ordered knows how to iterate over the (key,value) pairs of a JSON object. +type Ordered interface { + OrderedItems() iter.Seq2[string, any] +} + +// SetOrdered knows how to append or update the keys of a JSON object, +// given an iterator over (key,value) pairs. +// +// If the provided iterator is nil then the receiver should be set to nil. +type SetOrdered interface { + SetOrderedItems(iter.Seq2[string, any]) +} + +// OrderedMap represent a JSON object (i.e. like a map[string,any]), +// and knows how to serialize and deserialize JSON with the order of keys maintained. +type OrderedMap interface { + Ordered + SetOrdered + + OrderedMarshalJSON() ([]byte, error) + OrderedUnmarshalJSON([]byte) error +} + +// MarshalAdapter behaves likes the standard library [json.Marshal]. +type MarshalAdapter interface { + Poolable + + Marshal(any) ([]byte, error) +} + +// OrderedMarshalAdapter behaves likes the standard library [json.Marshal], preserving the order of keys in objects. +type OrderedMarshalAdapter interface { + Poolable + + OrderedMarshal(Ordered) ([]byte, error) +} + +// UnmarshalAdapter behaves likes the standard library [json.Unmarshal]. +type UnmarshalAdapter interface { + Poolable + + Unmarshal([]byte, any) error +} + +// OrderedUnmarshalAdapter behaves likes the standard library [json.Unmarshal], preserving the order of keys in objects. +type OrderedUnmarshalAdapter interface { + Poolable + + OrderedUnmarshal([]byte, SetOrdered) error +} + +// Adapter exposes an interface like the standard [json] library. +type Adapter interface { + MarshalAdapter + UnmarshalAdapter + + OrderedAdapter +} + +// OrderedAdapter exposes interfaces to process JSON and keep the order of object keys. +type OrderedAdapter interface { + OrderedMarshalAdapter + OrderedUnmarshalAdapter + NewOrderedMap(capacity int) OrderedMap +} + +type Poolable interface { + // Self-redeem: for [Adapter] s that are allocated from a pool. + // The [Adapter] must not be used after calling [Redeem]. + Redeem() + + // Reset the state of the [Adapter], if any. + Reset() +} diff --git a/vendor/github.com/go-openapi/swag/jsonutils/adapters/ifaces/registry_iface.go b/vendor/github.com/go-openapi/swag/jsonutils/adapters/ifaces/registry_iface.go new file mode 100644 index 0000000000..d1fe6a0adb --- /dev/null +++ b/vendor/github.com/go-openapi/swag/jsonutils/adapters/ifaces/registry_iface.go @@ -0,0 +1,88 @@ +package ifaces + +import ( + "strings" +) + +// Capability indicates what a JSON adapter is capable of. +type Capability uint8 + +const ( + CapabilityMarshalJSON Capability = 1 << iota + CapabilityUnmarshalJSON + CapabilityOrderedMarshalJSON + CapabilityOrderedUnmarshalJSON + CapabilityOrderedMap +) + +func (c Capability) String() string { + switch c { + case CapabilityMarshalJSON: + return "MarshalJSON" + case CapabilityUnmarshalJSON: + return "UnmarshalJSON" + case CapabilityOrderedMarshalJSON: + return "OrderedMarshalJSON" + case CapabilityOrderedUnmarshalJSON: + return "OrderedUnmarshalJSON" + case CapabilityOrderedMap: + return "OrderedMap" + default: + return "" + } +} + +// Capabilities holds several unitary capability flags +type Capabilities uint8 + +// Has some capability flag enabled. +func (c Capabilities) Has(capability Capability) bool { + return Capability(c)&capability > 0 +} + +func (c Capabilities) String() string { + var w strings.Builder + + first := true + for _, capability := range []Capability{ + CapabilityMarshalJSON, + CapabilityUnmarshalJSON, + CapabilityOrderedMarshalJSON, + CapabilityOrderedUnmarshalJSON, + CapabilityOrderedMap, + } { + if c.Has(capability) { + if !first { + w.WriteByte('|') + } else { + first = false + } + w.WriteString(capability.String()) + } + } + + return w.String() +} + +const ( + AllCapabilities Capabilities = Capabilities(uint8(CapabilityMarshalJSON) | + uint8(CapabilityUnmarshalJSON) | + uint8(CapabilityOrderedMarshalJSON) | + uint8(CapabilityOrderedUnmarshalJSON) | + uint8(CapabilityOrderedMap)) + + AllUnorderedCapabilities Capabilities = Capabilities(uint8(CapabilityMarshalJSON) | uint8(CapabilityUnmarshalJSON)) +) + +// RegistryEntry describes how any given adapter registers its capabilities to the [Registrar]. +type RegistryEntry struct { + Who string + What Capabilities + Constructor func() Adapter + Support func(what Capability, value any) bool +} + +// Registrar is a type that knows how to keep registration calls from adapters. +type Registrar interface { + RegisterFor(RegistryEntry) +} diff --git a/vendor/github.com/go-openapi/swag/jsonutils/adapters/registry.go b/vendor/github.com/go-openapi/swag/jsonutils/adapters/registry.go new file mode 100644 index 0000000000..b34a230518 --- /dev/null +++ b/vendor/github.com/go-openapi/swag/jsonutils/adapters/registry.go @@ -0,0 +1,240 @@ +// Copyright 2015 go-swagger maintainers +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package adapters + +import ( + "fmt" + "reflect" + "slices" + "sync" + + "github.com/go-openapi/swag/jsonutils/adapters/ifaces" + stdlib "github.com/go-openapi/swag/jsonutils/adapters/stdlib/json" +) + +// Registry holds the global registry for registered adapters. +var Registry = NewRegistrar() + +var ( + defaultRegistered = stdlib.Register + + _ ifaces.Registrar = &Registrar{} +) + +type registryError string + +func (e registryError) Error() string { + return string(e) +} + +// ErrRegistry indicates an error returned by the [Registrar]. +var ErrRegistry registryError = "JSON adapters registry error" + +type registry []*ifaces.RegistryEntry + +// Registrar holds registered [ifaces.Adapters] for different serialization capabilities. +// +// Internally, it maintains a cache for data types that favor a given adapter. +type Registrar struct { + marshalerRegistry registry + unmarshalerRegistry registry + orderedMarshalerRegistry registry + orderedUnmarshalerRegistry registry + orderedMapRegistry registry + + gmx sync.RWMutex + + // cache indexed by value type, so we don't have to lookup + marshalerCache map[reflect.Type]*ifaces.RegistryEntry + unmarshalerCache map[reflect.Type]*ifaces.RegistryEntry + orderedMarshalerCache map[reflect.Type]*ifaces.RegistryEntry + orderedUnmarshalerCache map[reflect.Type]*ifaces.RegistryEntry + orderedMapCache map[reflect.Type]*ifaces.RegistryEntry +} + +func NewRegistrar() *Registrar { + r := &Registrar{} + + r.marshalerRegistry = make(registry, 0, 1) + r.unmarshalerRegistry = make(registry, 0, 1) + r.orderedMarshalerRegistry = make(registry, 0, 1) + r.orderedUnmarshalerRegistry = make(registry, 0, 1) + r.orderedMapRegistry = make(registry, 0, 1) + + r.marshalerCache = make(map[reflect.Type]*ifaces.RegistryEntry) + r.unmarshalerCache = make(map[reflect.Type]*ifaces.RegistryEntry) + r.orderedMarshalerCache = make(map[reflect.Type]*ifaces.RegistryEntry) + r.orderedUnmarshalerCache = make(map[reflect.Type]*ifaces.RegistryEntry) + r.orderedMapCache = make(map[reflect.Type]*ifaces.RegistryEntry) + + defaultRegistered(r) + + return r +} + +// ClearCache resets the internal type cache. +func (r *Registrar) ClearCache() { + r.gmx.Lock() + r.clearCache() + r.gmx.Unlock() +} + +// Reset the [Registrar] to its defaults. +func (r *Registrar) Reset() { + r.gmx.Lock() + r.clearCache() + r.marshalerRegistry = r.marshalerRegistry[:0] + r.unmarshalerRegistry = r.unmarshalerRegistry[:0] + r.orderedMarshalerRegistry = r.orderedMarshalerRegistry[:0] + r.orderedUnmarshalerRegistry = r.orderedUnmarshalerRegistry[:0] + r.orderedMapRegistry = r.orderedMapRegistry[:0] + r.gmx.Unlock() + + defaultRegistered(r) +} + +// RegisterFor registers an adapter for some JSON capabilities. +func (r *Registrar) RegisterFor(entry ifaces.RegistryEntry) { + r.gmx.Lock() + if entry.What.Has(ifaces.CapabilityMarshalJSON) { + e := entry + e.What &= ifaces.Capabilities(ifaces.CapabilityMarshalJSON) + r.marshalerRegistry = slices.Insert(r.marshalerRegistry, 0, &e) + } + if entry.What.Has(ifaces.CapabilityUnmarshalJSON) { + e := entry + e.What &= ifaces.Capabilities(ifaces.CapabilityUnmarshalJSON) + r.unmarshalerRegistry = slices.Insert(r.unmarshalerRegistry, 0, &e) + } + if entry.What.Has(ifaces.CapabilityOrderedMarshalJSON) { + e := entry + e.What &= ifaces.Capabilities(ifaces.CapabilityOrderedMarshalJSON) + r.orderedMarshalerRegistry = slices.Insert(r.orderedMarshalerRegistry, 0, &e) + } + if entry.What.Has(ifaces.CapabilityOrderedUnmarshalJSON) { + e := entry + e.What &= ifaces.Capabilities(ifaces.CapabilityOrderedUnmarshalJSON) + r.orderedUnmarshalerRegistry = slices.Insert(r.orderedUnmarshalerRegistry, 0, &e) + } + if entry.What.Has(ifaces.CapabilityOrderedMap) { + e := entry + e.What &= ifaces.Capabilities(ifaces.CapabilityOrderedMap) + r.orderedMapRegistry = slices.Insert(r.orderedMapRegistry, 0, &e) + } + r.gmx.Unlock() +} + +// AdapterFor returns an [ifaces.Adapter] that supports this capability for this type of value. +// +// The [ifaces.Adapter] may be redeemed to its pool using its Redeem() method, for adapters that support global +// pooling. When this is not the case, the redeem function is just a no-operation. +func (r *Registrar) AdapterFor(capability ifaces.Capability, value any) ifaces.Adapter { + entry := r.findFirstFor(capability, value) + if entry == nil { + return nil + } + + return entry.Constructor() +} + +func (r *Registrar) clearCache() { + clear(r.marshalerCache) + clear(r.unmarshalerCache) + clear(r.orderedMarshalerCache) + clear(r.orderedUnmarshalerCache) + clear(r.orderedMapCache) +} + +func (r *Registrar) findFirstFor(capability ifaces.Capability, value any) *ifaces.RegistryEntry { + switch capability { + case ifaces.CapabilityMarshalJSON: + return r.findFirstInRegistryFor(r.marshalerRegistry, r.marshalerCache, capability, value) + case ifaces.CapabilityUnmarshalJSON: + return r.findFirstInRegistryFor(r.unmarshalerRegistry, r.unmarshalerCache, capability, value) + case ifaces.CapabilityOrderedMarshalJSON: + return r.findFirstInRegistryFor(r.orderedMarshalerRegistry, r.orderedMarshalerCache, capability, value) + case ifaces.CapabilityOrderedUnmarshalJSON: + return r.findFirstInRegistryFor(r.orderedUnmarshalerRegistry, r.orderedUnmarshalerCache, capability, value) + case ifaces.CapabilityOrderedMap: + return r.findFirstInRegistryFor(r.orderedMapRegistry, r.orderedMapCache, capability, value) + default: + panic(fmt.Errorf("unsupported capability %d: %w", capability, ErrRegistry)) + } +} + +func (r *Registrar) findFirstInRegistryFor(reg registry, cache map[reflect.Type]*ifaces.RegistryEntry, capability ifaces.Capability, value any) *ifaces.RegistryEntry { + r.gmx.RLock() + if len(reg) > 1 { + if entry, ok := cache[reflect.TypeOf(value)]; ok { + // cache hit + r.gmx.RUnlock() + return entry + } + } + + for _, entry := range reg { + if !entry.Support(capability, value) { + continue + } + + r.gmx.RUnlock() + + // update the internal cache + r.gmx.Lock() + cache[reflect.TypeOf(value)] = entry + r.gmx.Unlock() + + return entry + } + + // no adapter found + r.gmx.RUnlock() + + return nil +} + +// MarshalAdapterFor returns the first adapter that knows how to Marshal this type of value. +func MarshalAdapterFor(value any) ifaces.MarshalAdapter { + return Registry.AdapterFor(ifaces.CapabilityMarshalJSON, value) +} + +// OrderedMarshalAdapterFor returns the first adapter that knows how to OrderedMarshal this type of value. +func OrderedMarshalAdapterFor(value ifaces.Ordered) ifaces.OrderedMarshalAdapter { + return Registry.AdapterFor(ifaces.CapabilityOrderedMarshalJSON, value) +} + +// UnmarshalAdapterFor returns the first adapter that knows how to Unmarshal this type of value. +func UnmarshalAdapterFor(value any) ifaces.UnmarshalAdapter { + return Registry.AdapterFor(ifaces.CapabilityUnmarshalJSON, value) +} + +// OrderedUnmarshalAdapterFor provides the first adapter that knows how to OrderedUnmarshal this type of value. +func OrderedUnmarshalAdapterFor(value ifaces.SetOrdered) ifaces.OrderedUnmarshalAdapter { + return Registry.AdapterFor(ifaces.CapabilityOrderedUnmarshalJSON, value) +} + +// NewOrderedMap provides the "ordered map" implementation provided by the registry. +func NewOrderedMap(capacity int) ifaces.OrderedMap { + var v any + adapter := Registry.AdapterFor(ifaces.CapabilityOrderedUnmarshalJSON, v) + if adapter == nil { + return nil + } + + defer adapter.Redeem() + return adapter.NewOrderedMap(capacity) +} + +func noopRedeemer() {} diff --git a/vendor/github.com/go-openapi/swag/jsonutils/adapters/stdlib/json/adapter.go b/vendor/github.com/go-openapi/swag/jsonutils/adapters/stdlib/json/adapter.go new file mode 100644 index 0000000000..4df831b62d --- /dev/null +++ b/vendor/github.com/go-openapi/swag/jsonutils/adapters/stdlib/json/adapter.go @@ -0,0 +1,126 @@ +// Copyright 2015 go-swagger maintainers +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package json + +import ( + stdjson "encoding/json" + + "github.com/go-openapi/swag/jsonutils/adapters/ifaces" + "github.com/go-openapi/swag/typeutils" +) + +const sensibleBufferSize = 8192 + +type jsonError string + +func (e jsonError) Error() string { + return string(e) +} + +// ErrStdlib indicates that an error comes from the stdlib JSON adapter +var ErrStdlib jsonError = "error from the JSON adapter stdlib" + +var _ ifaces.Adapter = &Adapter{} + +type Adapter struct { +} + +// NewAdapter yields an [ifaces.Adapter] using the standard library. +func NewAdapter() *Adapter { + return &Adapter{} +} + +func (a *Adapter) Marshal(value any) ([]byte, error) { + return stdjson.Marshal(value) +} + +func (a *Adapter) Unmarshal(data []byte, value any) error { + return stdjson.Unmarshal(data, value) +} + +func (a *Adapter) OrderedMarshal(value ifaces.Ordered) ([]byte, error) { + w := poolOfWriters.Borrow() + defer func() { + poolOfWriters.Redeem(w) + }() + + if typeutils.IsNil(value) { + w.RawString("null") + + return w.BuildBytes() + } + + w.RawByte('{') + first := true + for k, v := range value.OrderedItems() { + if first { + first = false + } else { + w.RawByte(',') + } + + w.String(k) + w.RawByte(':') + + switch val := v.(type) { + case ifaces.Ordered: + w.Raw(a.OrderedMarshal(val)) + default: + w.Raw(stdjson.Marshal(v)) + } + } + + w.RawByte('}') + + return w.BuildBytes() +} + +func (a *Adapter) OrderedUnmarshal(data []byte, value ifaces.SetOrdered) error { + var m MapSlice + if err := m.OrderedUnmarshalJSON(data); err != nil { + return err + } + + if typeutils.IsNil(m) { + // force input value to nil + value.SetOrderedItems(nil) + + return nil + } + + value.SetOrderedItems(m.OrderedItems()) + + return nil +} + +func (a *Adapter) NewOrderedMap(capacity int) ifaces.OrderedMap { + m := make(MapSlice, 0, capacity) + + return &m +} + +// Redeem the [Adapter] when it comes from a pool. +// +// The adapter becomes immediately unusable once redeemed. +func (a *Adapter) Redeem() { + if a == nil { + return + } + + RedeemAdapter(a) +} + +func (a *Adapter) Reset() { +} diff --git a/vendor/github.com/go-openapi/swag/jsonutils/adapters/stdlib/json/doc.go b/vendor/github.com/go-openapi/swag/jsonutils/adapters/stdlib/json/doc.go new file mode 100644 index 0000000000..2ff6b212fc --- /dev/null +++ b/vendor/github.com/go-openapi/swag/jsonutils/adapters/stdlib/json/doc.go @@ -0,0 +1,16 @@ +// Copyright 2015 go-swagger maintainers +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package json implements an [ifaces.Adapter] using the standard library. +package json diff --git a/vendor/github.com/go-openapi/swag/jsonutils/adapters/stdlib/json/lexer.go b/vendor/github.com/go-openapi/swag/jsonutils/adapters/stdlib/json/lexer.go new file mode 100644 index 0000000000..6d919199d0 --- /dev/null +++ b/vendor/github.com/go-openapi/swag/jsonutils/adapters/stdlib/json/lexer.go @@ -0,0 +1,331 @@ +// Copyright 2015 go-swagger maintainers +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package json + +import ( + stdjson "encoding/json" + "errors" + "fmt" + "io" + "math" + "strconv" + + "github.com/go-openapi/swag/conv" +) + +type token struct { + stdjson.Token +} + +func (t token) String() string { + if t == invalidToken { + return "invalid token" + } + if t == eofToken { + return "EOF" + } + + return fmt.Sprintf("%v", t.Token) +} + +func (t token) Kind() tokenKind { + switch t.Token.(type) { + case nil: + return tokenNull + case stdjson.Delim: + return tokenDelim + case bool: + return tokenBool + case float64: + return tokenFloat + case stdjson.Number: + return tokenNumber + case string: + return tokenString + default: + return tokenUndef + } +} + +func (t token) Delim() byte { + r, ok := t.Token.(stdjson.Delim) + if !ok { + return 0 + } + + return byte(r) +} + +type tokenKind uint8 + +const ( + tokenUndef tokenKind = iota + tokenString + tokenNumber + tokenFloat + tokenBool + tokenNull + tokenDelim +) + +var ( + invalidToken = token{ + Token: stdjson.Token(struct{}{}), + } + + eofToken = token{ + Token: stdjson.Token(&struct{}{}), + } + + undefToken = token{ + Token: stdjson.Token(uint8(0)), + } +) + +// jlexer apes easyjson's jlexer, but uses the standard library decoder under the hood. +type jlexer struct { + buf *bytesReader + dec *stdjson.Decoder + err error + // current token + next token + // started bool +} + +type bytesReader struct { + buf []byte + offset int +} + +func (b *bytesReader) Reset() { + b.buf = nil + b.offset = 0 +} + +func (b *bytesReader) Read(p []byte) (int, error) { + if b.offset >= len(b.buf) { + return 0, io.EOF + } + + n := len(p) + buf := b.buf[b.offset:] + m := len(buf) + + if n >= m { + copy(p, buf) + b.offset += m + + return m, nil + } + + copy(p, buf[:n]) + b.offset += n + + return n, nil +} + +var _ io.Reader = &bytesReader{} + +func newLexer(data []byte) *jlexer { + l := &jlexer{ + // current: undefToken, + next: undefToken, + } + l.buf = &bytesReader{ + buf: data, + } + l.dec = stdjson.NewDecoder(l.buf) // unfortunately, cannot pool this + + return l +} + +func (l *jlexer) Reset() { + l.err = nil + l.next = undefToken + // leave l.dec and l.buf alone, since they are replaced at every Borrow +} + +func (l *jlexer) Error() error { + return l.err +} + +func (l *jlexer) SetErr(err error) { + l.err = err +} + +func (l *jlexer) Ok() bool { + return l.err == nil +} + +// NextToken consumes a token +func (l *jlexer) NextToken() token { + if !l.Ok() { + return invalidToken + } + + if l.next != undefToken { + next := l.next + l.next = undefToken + + return next + } + + return l.fetchToken() +} + +// PeekToken returns the next token without consuming it +func (l *jlexer) PeekToken() token { + if l.next == undefToken { + l.next = l.fetchToken() + } + + return l.next +} + +func (l *jlexer) Skip() { + _ = l.NextToken() +} + +func (l *jlexer) IsDelim(c byte) bool { + if !l.Ok() { + return false + } + + next := l.PeekToken() + if next.Kind() != tokenDelim { + return false + } + + if next.Delim() != c { + return false + } + + return true +} + +func (l *jlexer) IsNull() bool { + if !l.Ok() { + return false + } + + next := l.PeekToken() + + return next.Kind() == tokenNull +} + +func (l *jlexer) Delim(c byte) { + if !l.Ok() { + return + } + + tok := l.NextToken() + if tok.Kind() != tokenDelim { + l.err = fmt.Errorf("expected a delimiter token but got '%v': %w", tok, ErrStdlib) + + return + } + + if tok.Delim() != c { + l.err = fmt.Errorf("expected delimiter '%q' but got '%q': %w", c, tok.Delim(), ErrStdlib) + } +} + +func (l *jlexer) Null() { + if !l.Ok() { + return + } + + tok := l.NextToken() + if tok.Kind() != tokenNull { + l.err = fmt.Errorf("expected a null token but got '%v': %w", tok, ErrStdlib) + } +} + +func (l *jlexer) Number() any { + if !l.Ok() { + return 0 + } + + tok := l.NextToken() + + switch tok.Kind() { //nolint:exhaustive + case tokenNumber: + n := tok.Token.(stdjson.Number).String() + f, _ := strconv.ParseFloat(n, 64) + if conv.IsFloat64AJSONInteger(f) { + return int64(math.Trunc(f)) + } + + return f + + case tokenFloat: + f := tok.Token.(float64) + if conv.IsFloat64AJSONInteger(f) { + return int64(math.Trunc(f)) + } + + return f + + default: + l.err = fmt.Errorf("expected a number token but got '%v': %w", tok, ErrStdlib) + + return 0 + } +} + +func (l *jlexer) Bool() bool { + if !l.Ok() { + return false + } + + tok := l.NextToken() + if tok.Kind() != tokenBool { + l.err = fmt.Errorf("expected a bool token but got '%v': %w", tok, ErrStdlib) + + return false + } + + return tok.Token.(bool) +} + +func (l *jlexer) String() string { + if !l.Ok() { + return "" + } + + tok := l.NextToken() + if tok.Kind() != tokenString { + l.err = fmt.Errorf("expected a string token but got '%v': %w", tok, ErrStdlib) + + return "" + } + + return tok.Token.(string) +} + +// Commas and colons are elided. +func (l *jlexer) fetchToken() token { + jtok, err := l.dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + return eofToken + } + + l.err = errors.Join(err, ErrStdlib) + return invalidToken + } + + return token{Token: jtok} +} diff --git a/vendor/github.com/go-openapi/swag/jsonutils/adapters/stdlib/json/ordered_map.go b/vendor/github.com/go-openapi/swag/jsonutils/adapters/stdlib/json/ordered_map.go new file mode 100644 index 0000000000..18e6294e50 --- /dev/null +++ b/vendor/github.com/go-openapi/swag/jsonutils/adapters/stdlib/json/ordered_map.go @@ -0,0 +1,277 @@ +// Copyright 2015 go-swagger maintainers +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package json + +import ( + stdjson "encoding/json" + "fmt" + "iter" + + "github.com/go-openapi/swag/jsonutils/adapters/ifaces" +) + +var _ ifaces.OrderedMap = &MapSlice{} + +// MapSlice represents a JSON object, with the order of keys maintained. +type MapSlice []MapItem + +func (s MapSlice) OrderedItems() iter.Seq2[string, any] { + return func(yield func(string, any) bool) { + for _, item := range s { + if !yield(item.Key, item.Value) { + return + } + } + } +} + +func (s *MapSlice) SetOrderedItems(items iter.Seq2[string, any]) { + if items == nil { + *s = nil + + return + } + + m := *s + if len(m) > 0 { + // update mode + idx := make(map[string]int, len(m)) + + for i, item := range m { + idx[item.Key] = i + } + + for k, v := range items { + idx, ok := idx[k] + if ok { + m[idx].Value = v + + continue + } + m = append(m, MapItem{Key: k, Value: v}) + } + + *s = m + + return + } + + for k, v := range items { + m = append(m, MapItem{Key: k, Value: v}) + } + + *s = m +} + +// MarshalJSON renders a [MapSlice] as JSON bytes, preserving the order of keys. +func (s MapSlice) MarshalJSON() ([]byte, error) { + return s.OrderedMarshalJSON() +} + +func (s MapSlice) OrderedMarshalJSON() ([]byte, error) { + w := poolOfWriters.Borrow() + defer func() { + poolOfWriters.Redeem(w) + }() + + s.marshalObject(w) + + return w.BuildBytes() // this clones data, so it's okay to redeem the writer and its buffer +} + +// UnmarshalJSON builds a [MapSlice] from JSON bytes, preserving the order of keys. +// +// Inner objects are unmarshaled as [MapSlice] slices and not map[string]any. +func (s *MapSlice) UnmarshalJSON(data []byte) error { + return s.OrderedUnmarshalJSON(data) +} + +func (s *MapSlice) OrderedUnmarshalJSON(data []byte) error { + l := poolOfLexers.Borrow(data) + defer func() { + poolOfLexers.Redeem(l) + }() + + s.unmarshalObject(l) + + return l.Error() +} + +func (s MapSlice) marshalObject(w *jwriter) { + if s == nil { + w.RawString("null") + + return + } + + w.RawByte('{') + + if len(s) == 0 { + w.RawByte('}') + + return + } + + s[0].marshalJSON(w) + + for i := 1; i < len(s); i++ { + w.RawByte(',') + s[i].marshalJSON(w) + } + + w.RawByte('}') +} + +func (s *MapSlice) unmarshalObject(in *jlexer) { + if in.IsNull() { + in.Skip() + + return + } + + in.Delim('{') // consume token + if !in.Ok() { + return + } + + result := make(MapSlice, 0) + + for in.Ok() && !in.IsDelim('}') { + var mi MapItem + + mi.unmarshalKeyValue(in) + result = append(result, mi) + } + + in.Delim('}') + + if !in.Ok() { + return + } + + *s = result +} + +// MapItem represents the value of a key in a JSON object held by [MapSlice]. +// +// Notice that [MapItem] should not be marshaled to or unmarshaled from JSON directly, +// use this type as part of a [MapSlice] when dealing with JSON bytes. +type MapItem struct { + Key string + Value any +} + +func (s MapItem) marshalJSON(w *jwriter) { + w.String(s.Key) + w.RawByte(':') + w.Raw(stdjson.Marshal(s.Value)) +} + +func (s *MapItem) unmarshalKeyValue(in *jlexer) { + key := in.String() // consume string + value := s.asInterface(in) // consume any value, including termination tokens '}' or ']' + + if !in.Ok() { + return + } + + s.Key = key + s.Value = value +} + +func (s *MapItem) unmarshalArray(in *jlexer) []any { + if in.IsNull() { + in.Skip() + + return nil + } + + in.Delim('[') // consume token + if !in.Ok() { + return nil + } + + ret := make([]any, 0) + + for in.Ok() && !in.IsDelim(']') { + ret = append(ret, s.asInterface(in)) + } + + in.Delim(']') + if !in.Ok() { + return nil + } + + return ret +} + +// asInterface is very much like [jlexer.Lexer.Interface], but unmarshals an object +// into a [MapSlice], not a map[string]any. +// +// We have to force parsing errors somehow, since [jlexer.Lexer] doesn't let us +// set a parsing error directly. +func (s *MapItem) asInterface(in *jlexer) any { + if !in.Ok() { + return nil + } + + tok := in.PeekToken() // look-ahead what the next token looks like + kind := tok.Kind() + + switch kind { + case tokenString: + return in.String() // consume string + + case tokenNumber, tokenFloat: + return in.Number() + + case tokenBool: + return in.Bool() + + case tokenNull: + in.Null() + + return nil + + case tokenDelim: + switch tok.Delim() { + case '{': // not consumed yet + ret := make(MapSlice, 0) + ret.unmarshalObject(in) // consumes the terminating '}' + + if in.Ok() { + return ret + } + + // lexer is in an error state: will exhaust + return nil + + case '[': // not consumed yet + return s.unmarshalArray(in) // consumes the terminating ']' + default: + in.SetErr(fmt.Errorf("unexpected delimiter: %v: %w", tok, ErrStdlib)) // force error + return nil + } + + case tokenUndef: + fallthrough + default: + if in.Ok() { + in.SetErr(fmt.Errorf("unexpected token: %v: %w", tok, ErrStdlib)) // force error + } + + return nil + } +} diff --git a/vendor/github.com/go-openapi/swag/jsonutils/adapters/stdlib/json/pool.go b/vendor/github.com/go-openapi/swag/jsonutils/adapters/stdlib/json/pool.go new file mode 100644 index 0000000000..0f51d3a20c --- /dev/null +++ b/vendor/github.com/go-openapi/swag/jsonutils/adapters/stdlib/json/pool.go @@ -0,0 +1,154 @@ +// Copyright 2015 go-swagger maintainers +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package json + +import ( + "encoding/json" + "sync" + + "github.com/go-openapi/swag/jsonutils/adapters/ifaces" +) + +type adaptersPool struct { + sync.Pool +} + +func (p *adaptersPool) Borrow() *Adapter { + return p.Get().(*Adapter) +} + +func (p *adaptersPool) BorrowIface() ifaces.Adapter { + return p.Get().(*Adapter) +} + +func (p *adaptersPool) Redeem(a *Adapter) { + p.Put(a) +} + +type writersPool struct { + sync.Pool +} + +func (p *writersPool) Borrow() *jwriter { + ptr := p.Get() + + jw := ptr.(*jwriter) + jw.Reset() + + return jw +} + +func (p *writersPool) Redeem(w *jwriter) { + p.Put(w) +} + +type lexersPool struct { + sync.Pool +} + +func (p *lexersPool) Borrow(data []byte) *jlexer { + ptr := p.Get() + + l := ptr.(*jlexer) + l.buf = poolOfReaders.Borrow(data) + l.dec = json.NewDecoder(l.buf) // cannot pool, not exposed by the encoding/json API + l.Reset() + + return l +} + +func (p *lexersPool) Redeem(l *jlexer) { + l.dec = nil + discard := l.buf + l.buf = nil + poolOfReaders.Redeem(discard) + p.Put(l) +} + +type readersPool struct { + sync.Pool +} + +func (p *readersPool) Borrow(data []byte) *bytesReader { + ptr := p.Get() + + b := ptr.(*bytesReader) + b.Reset() + b.buf = data + + return b +} + +func (p *readersPool) Redeem(b *bytesReader) { + p.Put(b) +} + +var ( + poolOfAdapters = &adaptersPool{ + Pool: sync.Pool{ + New: func() any { + return NewAdapter() + }, + }, + } + + poolOfWriters = &writersPool{ + Pool: sync.Pool{ + New: func() any { + return newJWriter() + }, + }, + } + + poolOfLexers = &lexersPool{ + Pool: sync.Pool{ + New: func() any { + return newLexer(nil) + }, + }, + } + + poolOfReaders = &readersPool{ + Pool: sync.Pool{ + New: func() any { + return &bytesReader{} + }, + }, + } +) + +// BorrowAdapter borrows an [Adapter] from the pool, recycling already allocated instances. +func BorrowAdapter() *Adapter { + return poolOfAdapters.Borrow() +} + +// BorrowAdapterIface borrows a stdlib [Adapter] and converts it directly +// to [ifaces.Adapter]. This is useful to avoid further allocations when +// translating the concrete type into an interface. +func BorrowAdapterIface() ifaces.Adapter { + return poolOfAdapters.BorrowIface() +} + +// RedeemAdapter redeems an [Adapter] to the pool, so it may be recycled. +func RedeemAdapter(a *Adapter) { + poolOfAdapters.Redeem(a) +} + +func RedeemAdapterIface(a ifaces.Adapter) { + concrete, ok := a.(*Adapter) + if ok { + poolOfAdapters.Redeem(concrete) + } +} diff --git a/vendor/github.com/go-openapi/swag/jsonutils/adapters/stdlib/json/register.go b/vendor/github.com/go-openapi/swag/jsonutils/adapters/stdlib/json/register.go new file mode 100644 index 0000000000..18bbc37745 --- /dev/null +++ b/vendor/github.com/go-openapi/swag/jsonutils/adapters/stdlib/json/register.go @@ -0,0 +1,37 @@ +// Copyright 2015 go-swagger maintainers +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package json + +import ( + "fmt" + "reflect" + + "github.com/go-openapi/swag/jsonutils/adapters/ifaces" +) + +func Register(dispatcher ifaces.Registrar) { + t := reflect.TypeOf(Adapter{}) + dispatcher.RegisterFor( + ifaces.RegistryEntry{ + Who: fmt.Sprintf("%s.%s", t.PkgPath(), t.Name()), + What: ifaces.AllCapabilities, + Constructor: BorrowAdapterIface, + Support: support, + }) +} + +func support(_ ifaces.Capability, _ any) bool { + return true +} diff --git a/vendor/github.com/go-openapi/swag/jsonutils/adapters/stdlib/json/writer.go b/vendor/github.com/go-openapi/swag/jsonutils/adapters/stdlib/json/writer.go new file mode 100644 index 0000000000..38e9b6e034 --- /dev/null +++ b/vendor/github.com/go-openapi/swag/jsonutils/adapters/stdlib/json/writer.go @@ -0,0 +1,86 @@ +// Copyright 2015 go-swagger maintainers +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package json + +import ( + "bytes" + "encoding/json" + "strings" +) + +type jwriter struct { + buf *bytes.Buffer + err error +} + +func newJWriter() *jwriter { + buf := make([]byte, 0, sensibleBufferSize) + + return &jwriter{buf: bytes.NewBuffer(buf)} +} + +func (w *jwriter) Reset() { + w.buf.Reset() + w.err = nil +} + +func (w *jwriter) RawString(s string) { + if w.err != nil { + return + } + w.buf.WriteString(s) +} + +func (w *jwriter) Raw(b []byte, err error) { + if w.err != nil { + return + } + if err != nil { + w.err = err + return + } + + _, _ = w.buf.Write(b) +} + +func (w *jwriter) RawByte(c byte) { + if w.err != nil { + return + } + w.buf.WriteByte(c) +} + +var quoteReplacer = strings.NewReplacer(`"`, `\"`, `\`, `\\`) + +func (w *jwriter) String(s string) { + if w.err != nil { + return + } + // escape quotes and \ + s = quoteReplacer.Replace(s) + + _ = w.buf.WriteByte('"') + json.HTMLEscape(w.buf, []byte(s)) + _ = w.buf.WriteByte('"') +} + +// BuildBytes returns a clone of the internal buffer. +func (w *jwriter) BuildBytes() ([]byte, error) { + if w.err != nil { + return nil, w.err + } + + return bytes.Clone(w.buf.Bytes()), nil +} diff --git a/vendor/github.com/go-openapi/swag/jsonutils/concat.go b/vendor/github.com/go-openapi/swag/jsonutils/concat.go new file mode 100644 index 0000000000..049d4698bf --- /dev/null +++ b/vendor/github.com/go-openapi/swag/jsonutils/concat.go @@ -0,0 +1,103 @@ +// Copyright 2015 go-swagger maintainers +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package jsonutils + +import ( + "bytes" +) + +// nullJSON represents a JSON object with null type +var nullJSON = []byte("null") + +const comma = byte(',') + +var closers map[byte]byte + +func init() { + closers = map[byte]byte{ + '{': '}', + '[': ']', + } +} + +// ConcatJSON concatenates multiple json objects or arrays efficiently. +// +// Note that [ConcatJSON] performs a very simmple (and fast) concatenation +// operation: it does not attempt to merge objects. +func ConcatJSON(blobs ...[]byte) []byte { + if len(blobs) == 0 { + return nil + } + + last := len(blobs) - 1 + for blobs[last] == nil || bytes.Equal(blobs[last], nullJSON) { + // strips trailing null objects + last-- + if last < 0 { + // there was nothing but "null"s or nil... + return nil + } + } + if last == 0 { + return blobs[0] + } + + var opening, closing byte + var idx, a int + buf := bytes.NewBuffer(nil) + + for i, b := range blobs[:last+1] { + if b == nil || bytes.Equal(b, nullJSON) { + // a null object is in the list: skip it + continue + } + if len(b) > 0 && opening == 0 { // is this an array or an object? + opening, closing = b[0], closers[b[0]] + } + + if opening != '{' && opening != '[' { + continue // don't know how to concatenate non container objects + } + + const minLengthIfNotEmpty = 3 + if len(b) < minLengthIfNotEmpty { // yep empty but also the last one, so closing this thing + if i == last && a > 0 { + _ = buf.WriteByte(closing) // never returns err != nil + } + continue + } + + idx = 0 + if a > 0 { // we need to join with a comma for everything beyond the first non-empty item + _ = buf.WriteByte(comma) // never returns err != nil + idx = 1 // this is not the first or the last so we want to drop the leading bracket + } + + if i != last { // not the last one, strip brackets + _, _ = buf.Write(b[idx : len(b)-1]) // never returns err != nil + } else { // last one, strip only the leading bracket + _, _ = buf.Write(b[idx:]) + } + a++ + } + + // somehow it ended up being empty, so provide a default value + if buf.Len() == 0 && (opening == '{' || opening == '[') { + _ = buf.WriteByte(opening) // never returns err != nil + _ = buf.WriteByte(closing) + } + + return buf.Bytes() +} diff --git a/vendor/github.com/go-openapi/swag/jsonutils/doc.go b/vendor/github.com/go-openapi/swag/jsonutils/doc.go new file mode 100644 index 0000000000..495ef83413 --- /dev/null +++ b/vendor/github.com/go-openapi/swag/jsonutils/doc.go @@ -0,0 +1,18 @@ +// Copyright 2015 go-swagger maintainers +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package jsonutils provides helpers to work with JSON. +// +// These utilities work with dynamic go structures to and from JSON. +package jsonutils diff --git a/vendor/github.com/go-openapi/swag/jsonutils/json.go b/vendor/github.com/go-openapi/swag/jsonutils/json.go new file mode 100644 index 0000000000..a33b89bd4d --- /dev/null +++ b/vendor/github.com/go-openapi/swag/jsonutils/json.go @@ -0,0 +1,127 @@ +// Copyright 2015 go-swagger maintainers +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package jsonutils + +import ( + "bytes" + "encoding/json" + + "github.com/go-openapi/swag/jsonutils/adapters" + "github.com/go-openapi/swag/jsonutils/adapters/ifaces" +) + +// WriteJSON marshals a data structure as JSON. +// +// The difference with [json.Marshal] is that it may check among several alternatives +// to do so. +// +// See [adapters.Registrar] for more details about how to configure +// multiple serialization alternatives. +// +// NOTE: to allow types that are [easyjson.Marshaler] s to use that route to process JSON, +// you now need to register the adapter for easyjson at runtime. +func WriteJSON(value any) ([]byte, error) { + if orderedMap, isOrdered := value.(ifaces.Ordered); isOrdered { + orderedMarshaler := adapters.OrderedMarshalAdapterFor(orderedMap) + + if orderedMarshaler != nil { + defer orderedMarshaler.Redeem() + + return orderedMarshaler.OrderedMarshal(orderedMap) + } + + // no support found in registered adapters, fallback to the default (unordered) case + } + + marshaler := adapters.MarshalAdapterFor(value) + if marshaler != nil { + defer marshaler.Redeem() + + return marshaler.Marshal(value) + } + + // no support found in registered adapters, fallback to the default standard library. + // + // This only happens when tinkering with the global registry of adapters, since the default handles all the above cases. + return json.Marshal(value) // Codecov ignore // this is a safeguard not easily simulated in tests +} + +// ReadJSON unmarshals JSON data into a data structure. +// +// The difference with [json.Unmarshal] is that it may check among several alternatives +// to do so. +// +// See [adapters.Registrar] for more details about how to configure +// multiple serialization alternatives. +// +// NOTE: value must be a pointer. +// +// If the provided value implements [ifaces.SetOrdered], it is a considered an "ordered map" and [ReadJSON] +// will favor an adapter that supports the [ifaces.OrderedUnmarshal] feature, or fallback to +// an unordered behavior if none is found. +// +// NOTE: to allow types that are [easyjson.Unmarshaler] s to use that route to process JSON, +// you now need to register the adapter for easyjson at runtime. +func ReadJSON(data []byte, value any) error { + trimmedData := bytes.Trim(data, "\x00") + + if orderedMap, isOrdered := value.(ifaces.SetOrdered); isOrdered { + // if the value is an ordered map, favors support for OrderedUnmarshal. + + orderedUnmarshaler := adapters.OrderedUnmarshalAdapterFor(orderedMap) + + if orderedUnmarshaler != nil { + defer orderedUnmarshaler.Redeem() + + return orderedUnmarshaler.OrderedUnmarshal(trimmedData, orderedMap) + } + + // no support found in registered adapters, fallback to the default (unordered) case + } + + unmarshaler := adapters.UnmarshalAdapterFor(value) + if unmarshaler != nil { + defer unmarshaler.Redeem() + + return unmarshaler.Unmarshal(trimmedData, value) + } + + // no support found in registered adapters, fallback to the default standard library. + // + // This only happens when tinkering with the global registry of adapters, since the default handles all the above cases. + return json.Unmarshal(trimmedData, value) // Codecov ignore // this is a safeguard not easily simulated in tests +} + +// FromDynamicJSON turns a go value into a properly JSON typed structure. +// +// "Dynamic JSON" refers to what you get when unmarshaling JSON into an untyped any, +// i.e. objects are represented by map[string]any, arrays by []any, and +// all numbers are represented as float64. +// +// NOTE: target must be a pointer. +// +// # Maintaining the order of keys in objects +// +// If source and target implement [ifaces.Ordered] and [ifaces.SetOrdered] respectively, +// they are considered "ordered maps" and the order of keys is maintained in the +// "jsonification" process. In that case, map[string]any values are replaced by (ordered) [JSONMapSlice] ones. +func FromDynamicJSON(source, target any) error { + b, err := WriteJSON(source) + if err != nil { + return err + } + + return ReadJSON(b, target) +} diff --git a/vendor/github.com/go-openapi/swag/jsonutils/ordered_map.go b/vendor/github.com/go-openapi/swag/jsonutils/ordered_map.go new file mode 100644 index 0000000000..931ce2559d --- /dev/null +++ b/vendor/github.com/go-openapi/swag/jsonutils/ordered_map.go @@ -0,0 +1,125 @@ +// Copyright 2015 go-swagger maintainers +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package jsonutils + +import ( + "iter" + + "github.com/go-openapi/swag/jsonutils/adapters" + "github.com/go-openapi/swag/typeutils" +) + +// JSONMapSlice represents a JSON object, with the order of keys maintained. +// +// It behaves like an ordered map, but keys can't be accessed in constant time. +type JSONMapSlice []JSONMapItem + +// OrderedItems iterates over all (key,value) pairs with the order of keys maintained. +// +// This implements the [ifaces.Ordered] interface, so that [ifaces.Adapter] s know how to marshal +// keys in the desired order. +func (s JSONMapSlice) OrderedItems() iter.Seq2[string, any] { + return func(yield func(string, any) bool) { + for _, item := range s { + if !yield(item.Key, item.Value) { + return + } + } + } +} + +// SetOrderedItems sets keys in the [JSONMapSlice] objects, as presented by +// the provided iterator. +// +// As a special case, if items is nil, this sets to receiver to a nil slice. +// +// This implements the [ifaces.SetOrdered] interface, so that [ifaces.Adapter] s know how to unmarshal +// keys in the desired order. +func (s *JSONMapSlice) SetOrderedItems(items iter.Seq2[string, any]) { + if items == nil { + // force receiver to be a nil slice + *s = nil + + return + } + + m := *s + if len(m) > 0 { + // update mode: short-circuited when unmarshaling fresh data structures + idx := make(map[string]int, len(m)) + + for i, item := range m { + idx[item.Key] = i + } + + for k, v := range items { + idx, ok := idx[k] + if ok { + m[idx].Value = v + + continue + } + + m = append(m, JSONMapItem{Key: k, Value: v}) + } + + *s = m + + return + } + + for k, v := range items { + m = append(m, JSONMapItem{Key: k, Value: v}) + } + + *s = m +} + +// MarshalJSON renders a [JSONMapSlice] as JSON bytes, preserving the order of keys. +// +// It will pick the JSON library currently configured by the [adapters.Registry] (defaults to the standard library). +func (s JSONMapSlice) MarshalJSON() ([]byte, error) { + orderedMarshaler := adapters.OrderedMarshalAdapterFor(s) + defer orderedMarshaler.Redeem() + + return orderedMarshaler.OrderedMarshal(s) +} + +// UnmarshalJSON builds a [JSONMapSlice] from JSON bytes, preserving the order of keys. +// +// Inner objects are unmarshaled as ordered [JSONMapSlice] slices and not map[string]any. +// +// It will pick the JSON library currently configured by the [adapters.Registry] (defaults to the standard library). +func (s *JSONMapSlice) UnmarshalJSON(data []byte) error { + if typeutils.IsNil(*s) { + // allow to unmarshal with a simple var declaration (nil slice) + *s = JSONMapSlice{} + } + + orderedUnmarshaler := adapters.OrderedUnmarshalAdapterFor(s) + defer orderedUnmarshaler.Redeem() + + return orderedUnmarshaler.OrderedUnmarshal(data, s) +} + +// JSONMapItem represents the value of a key in a JSON object held by [JSONMapSlice]. +// +// Notice that JSONMapItem should not be marshaled to or unmarshaled from JSON directly. +// +// Use this type as part of a [JSONMapSlice] when dealing with JSON bytes. +type JSONMapItem struct { + Key string + Value any +} diff --git a/vendor/github.com/go-openapi/swag/jsonutils_iface.go b/vendor/github.com/go-openapi/swag/jsonutils_iface.go new file mode 100644 index 0000000000..63e23f0b69 --- /dev/null +++ b/vendor/github.com/go-openapi/swag/jsonutils_iface.go @@ -0,0 +1,76 @@ +// Copyright 2015 go-swagger maintainers +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package swag + +import ( + "log" + + "github.com/go-openapi/swag/jsonutils" +) + +// JSONMapSlice represents a JSON object, with the order of keys maintained +// +// Deprecated: use [jsonutils.JSONMapSlice] instead, or [yamlutils.YAMLMapSlice] if you marshal YAML. +type JSONMapSlice = jsonutils.JSONMapSlice + +// JSONMapItem represents a JSON object, with the order of keys maintained +// +// Deprecated: use [jsonutils.JSONMapItem] instead. +type JSONMapItem = jsonutils.JSONMapItem + +// WriteJSON writes json data. +// +// Deprecated: use [jsonutils.WriteJSON] instead. +func WriteJSON(data interface{}) ([]byte, error) { return jsonutils.WriteJSON(data) } + +// ReadJSON reads json data. +// +// Deprecated: use [jsonutils.ReadJSON] instead. +func ReadJSON(data []byte, value interface{}) error { return jsonutils.ReadJSON(data, value) } + +// DynamicJSONToStruct converts an untyped JSON structure into a target data type. +// +// Deprecated: use [jsonutils.FromDynamicJSON] instead. +func DynamicJSONToStruct(data interface{}, target interface{}) error { + return jsonutils.FromDynamicJSON(data, target) +} + +// ConcatJSON concatenates multiple JSON objects efficiently. +// +// Deprecated: use [jsonutils.ConcatJSON] instead. +func ConcatJSON(blobs ...[]byte) []byte { return jsonutils.ConcatJSON(blobs...) } + +// ToDynamicJSON turns a go value into a properly JSON untyped structure. +// +// It is the same as [FromDynamicJSON], but doesn't check for errors. +// +// Deprecated: this function is a misnomer and is unsafe. Use [jsonutils.FromDynamicJSON] instead. +func ToDynamicJSON(value interface{}) interface{} { + var res interface{} + if err := FromDynamicJSON(value, &res); err != nil { + log.Println(err) + } + + return res +} + +// FromDynamicJSON turns a go value into a properly JSON typed structure. +// +// "Dynamic JSON" refers to what you get when unmarshaling JSON into an untyped interface{}, +// i.e. objects are represented by map[string]interface{}, arrays by []interface{}, and all +// scalar values are interface{}. +// +// Deprecated: use [jsonutils.FromDynamicJSON] instead. +func FromDynamicJSON(data, target interface{}) error { return jsonutils.FromDynamicJSON(data, target) } diff --git a/vendor/github.com/go-openapi/swag/loading/LICENSE b/vendor/github.com/go-openapi/swag/loading/LICENSE new file mode 100644 index 0000000000..d645695673 --- /dev/null +++ b/vendor/github.com/go-openapi/swag/loading/LICENSE @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/go-openapi/swag/loading/doc.go b/vendor/github.com/go-openapi/swag/loading/doc.go new file mode 100644 index 0000000000..62585615e1 --- /dev/null +++ b/vendor/github.com/go-openapi/swag/loading/doc.go @@ -0,0 +1,16 @@ +// Copyright 2015 go-swagger maintainers +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package loading provides tools to load a file from http or from a local file system. +package loading diff --git a/vendor/github.com/go-openapi/swag/loading/errors.go b/vendor/github.com/go-openapi/swag/loading/errors.go new file mode 100644 index 0000000000..ca45732a7a --- /dev/null +++ b/vendor/github.com/go-openapi/swag/loading/errors.go @@ -0,0 +1,26 @@ +// Copyright 2015 go-swagger maintainers +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package loading + +type loadingError string + +const ( + // ErrLoader is an error raised by the file loader utility + ErrLoader loadingError = "loader error" +) + +func (e loadingError) Error() string { + return string(e) +} diff --git a/vendor/github.com/go-openapi/swag/loading/json.go b/vendor/github.com/go-openapi/swag/loading/json.go new file mode 100644 index 0000000000..aadf999135 --- /dev/null +++ b/vendor/github.com/go-openapi/swag/loading/json.go @@ -0,0 +1,22 @@ +package loading + +import ( + "encoding/json" + "errors" + "path/filepath" +) + +// JSONMatcher matches json for a file loader. +func JSONMatcher(path string) bool { + ext := filepath.Ext(path) + return ext == ".json" || ext == ".jsn" || ext == ".jso" +} + +// JSONDoc loads a json document from either a file or a remote url. +func JSONDoc(path string, opts ...Option) (json.RawMessage, error) { + data, err := LoadFromFileOrHTTP(path, opts...) + if err != nil { + return nil, errors.Join(err, ErrLoader) + } + return json.RawMessage(data), nil +} diff --git a/vendor/github.com/go-openapi/swag/loading.go b/vendor/github.com/go-openapi/swag/loading/loading.go similarity index 69% rename from vendor/github.com/go-openapi/swag/loading.go rename to vendor/github.com/go-openapi/swag/loading/loading.go index 783442fddf..bd955535f7 100644 --- a/vendor/github.com/go-openapi/swag/loading.go +++ b/vendor/github.com/go-openapi/swag/loading/loading.go @@ -12,43 +12,26 @@ // See the License for the specific language governing permissions and // limitations under the License. -package swag +package loading import ( + "context" + "embed" "fmt" "io" "log" "net/http" "net/url" - "os" "path" "path/filepath" "runtime" "strings" - "time" ) -// LoadHTTPTimeout the default timeout for load requests -var LoadHTTPTimeout = 30 * time.Second - -// LoadHTTPBasicAuthUsername the username to use when load requests require basic auth -var LoadHTTPBasicAuthUsername = "" - -// LoadHTTPBasicAuthPassword the password to use when load requests require basic auth -var LoadHTTPBasicAuthPassword = "" - -// LoadHTTPCustomHeaders an optional collection of custom HTTP headers for load requests -var LoadHTTPCustomHeaders = map[string]string{} - // LoadFromFileOrHTTP loads the bytes from a file or a remote http server based on the path passed in -func LoadFromFileOrHTTP(pth string) ([]byte, error) { - return LoadStrategy(pth, os.ReadFile, loadHTTPBytes(LoadHTTPTimeout))(pth) -} - -// LoadFromFileOrHTTPWithTimeout loads the bytes from a file or a remote http server based on the path passed in -// timeout arg allows for per request overriding of the request timeout -func LoadFromFileOrHTTPWithTimeout(pth string, timeout time.Duration) ([]byte, error) { - return LoadStrategy(pth, os.ReadFile, loadHTTPBytes(timeout))(pth) +func LoadFromFileOrHTTP(pth string, opts ...Option) ([]byte, error) { + o := optionsWithDefaults(opts) + return LoadStrategy(pth, o.ReadFileFunc(), loadHTTPBytes(opts...), opts...)(pth) } // LoadStrategy returns a loader function for a given path or URI. @@ -81,10 +64,12 @@ func LoadFromFileOrHTTPWithTimeout(pth string, timeout time.Duration) ([]byte, e // - `file://host/folder/file` becomes an UNC path like `\\host\folder\file` (no port specification is supported) // - `file:///c:/folder/file` becomes `C:\folder\file` // - `file://c:/folder/file` is tolerated (without leading `/`) and becomes `c:\folder\file` -func LoadStrategy(pth string, local, remote func(string) ([]byte, error)) func(string) ([]byte, error) { +func LoadStrategy(pth string, local, remote func(string) ([]byte, error), opts ...Option) func(string) ([]byte, error) { if strings.HasPrefix(pth, "http") { return remote } + o := optionsWithDefaults(opts) + _, isEmbedFS := o.fs.(embed.FS) return func(p string) ([]byte, error) { upth, err := url.PathUnescape(p) @@ -92,19 +77,19 @@ func LoadStrategy(pth string, local, remote func(string) ([]byte, error)) func(s return nil, err } - if !strings.HasPrefix(p, `file://`) { + cpth, hasPrefix := strings.CutPrefix(upth, "file://") + if !hasPrefix || isEmbedFS || runtime.GOOS != "windows" { + // crude processing: trim the file:// prefix. This leaves full URIs with a host with a (mostly) unexpected result // regular file path provided: just normalize slashes - return local(filepath.FromSlash(upth)) - } - - if runtime.GOOS != "windows" { - // crude processing: this leaves full URIs with a host with a (mostly) unexpected result - upth = strings.TrimPrefix(upth, `file://`) + if isEmbedFS { + // on windows, we need to slash the path if FS is an embed FS. + return local(strings.TrimLeft(filepath.ToSlash(cpth), "./")) // remove invalid leading characters for embed FS + } - return local(filepath.FromSlash(upth)) + return local(filepath.FromSlash(cpth)) } - // windows-only pre-processing of file://... URIs + // windows-only pre-processing of file://... URIs, excluding embed.FS // support for canonical file URIs on windows. u, err := url.Parse(filepath.ToSlash(upth)) @@ -139,19 +124,29 @@ func LoadStrategy(pth string, local, remote func(string) ([]byte, error)) func(s } } -func loadHTTPBytes(timeout time.Duration) func(path string) ([]byte, error) { +func loadHTTPBytes(opts ...Option) func(path string) ([]byte, error) { + o := optionsWithDefaults(opts) + return func(path string) ([]byte, error) { - client := &http.Client{Timeout: timeout} - req, err := http.NewRequest(http.MethodGet, path, nil) //nolint:noctx + client := o.client + timeoutCtx := context.Background() + var cancel func() + + if o.httpTimeout > 0 { + timeoutCtx, cancel = context.WithTimeout(timeoutCtx, o.httpTimeout) + defer cancel() + } + + req, err := http.NewRequestWithContext(timeoutCtx, http.MethodGet, path, nil) if err != nil { return nil, err } - if LoadHTTPBasicAuthUsername != "" && LoadHTTPBasicAuthPassword != "" { - req.SetBasicAuth(LoadHTTPBasicAuthUsername, LoadHTTPBasicAuthPassword) + if o.basicAuthUsername != "" && o.basicAuthPassword != "" { + req.SetBasicAuth(o.basicAuthUsername, o.basicAuthPassword) } - for key, val := range LoadHTTPCustomHeaders { + for key, val := range o.customHeaders { req.Header.Set(key, val) } @@ -168,7 +163,7 @@ func loadHTTPBytes(timeout time.Duration) func(path string) ([]byte, error) { } if resp.StatusCode != http.StatusOK { - return nil, fmt.Errorf("could not access document at %q [%s] ", path, resp.Status) + return nil, fmt.Errorf("could not access document at %q [%s]: %w", path, resp.Status, ErrLoader) } return io.ReadAll(resp.Body) diff --git a/vendor/github.com/go-openapi/swag/loading/options.go b/vendor/github.com/go-openapi/swag/loading/options.go new file mode 100644 index 0000000000..a51329e938 --- /dev/null +++ b/vendor/github.com/go-openapi/swag/loading/options.go @@ -0,0 +1,136 @@ +// Copyright 2015 go-swagger maintainers +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package loading + +import ( + "io/fs" + "net/http" + "os" + "time" +) + +type ( + // Option provides options for loading a file over HTTP or from a file. + Option func(*options) + + httpOptions struct { + httpTimeout time.Duration + basicAuthUsername string + basicAuthPassword string + customHeaders map[string]string + client *http.Client + } + + fileOptions struct { + fs fs.ReadFileFS + } + + options struct { + httpOptions + fileOptions + } +) + +func (fo fileOptions) ReadFileFunc() func(string) ([]byte, error) { + if fo.fs == nil { + return os.ReadFile + } + + return fo.fs.ReadFile +} + +// WithTimeout sets a timeout for the remote file loader. +// +// The default timeout is 30s. +func WithTimeout(timeout time.Duration) Option { + return func(o *options) { + o.httpTimeout = timeout + } +} + +// WithBasicAuth sets a basic authentication scheme for the remote file loader. +func WithBasicAuth(username, password string) Option { + return func(o *options) { + o.basicAuthUsername = username + o.basicAuthPassword = password + } +} + +// WithCustomHeaders sets custom headers for the remote file loader. +func WithCustomHeaders(headers map[string]string) Option { + return func(o *options) { + if o.customHeaders == nil { + o.customHeaders = make(map[string]string, len(headers)) + } + + for header, value := range headers { + o.customHeaders[header] = value + } + } +} + +// WithHTTClient overrides the default HTTP client used to fetch a remote file. +// +// By default, [http.DefaultClient] is used. +func WithHTTPClient(client *http.Client) Option { + return func(o *options) { + o.client = client + } +} + +// WithFileFS sets a file system for the local file loader. +// +// If the provided file system is a [fs.ReadFileFS], the ReadFile function is used. +// Otherwise, ReadFile is wrapped using [fs.ReadFile]. +// +// By default, the file system is the one provided by the os package. +// +// For example, this may be set to consume from an embedded file system, or a rooted FS. +func WithFS(filesystem fs.FS) Option { + return func(o *options) { + if rfs, ok := filesystem.(fs.ReadFileFS); ok { + o.fs = rfs + + return + } + o.fs = readFileFS{FS: filesystem} + } +} + +type readFileFS struct { + fs.FS +} + +func (r readFileFS) ReadFile(name string) ([]byte, error) { + return fs.ReadFile(r.FS, name) +} + +func optionsWithDefaults(opts []Option) options { + const defaultTimeout = 30 * time.Second + + o := options{ + // package level defaults + httpOptions: httpOptions{ + httpTimeout: defaultTimeout, + client: http.DefaultClient, + }, + } + + for _, apply := range opts { + apply(&o) + } + + return o +} diff --git a/vendor/github.com/go-openapi/swag/loading/yaml.go b/vendor/github.com/go-openapi/swag/loading/yaml.go new file mode 100644 index 0000000000..40bd2a769f --- /dev/null +++ b/vendor/github.com/go-openapi/swag/loading/yaml.go @@ -0,0 +1,48 @@ +// Copyright 2015 go-swagger maintainers +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package loading + +import ( + "encoding/json" + "path/filepath" + + "github.com/go-openapi/swag/yamlutils" +) + +// YAMLMatcher matches yaml for a file loader. +func YAMLMatcher(path string) bool { + ext := filepath.Ext(path) + return ext == ".yaml" || ext == ".yml" +} + +// YAMLDoc loads a yaml document from either http or a file and converts it to json. +func YAMLDoc(path string, opts ...Option) (json.RawMessage, error) { + yamlDoc, err := YAMLData(path, opts...) + if err != nil { + return nil, err + } + + return yamlutils.YAMLToJSON(yamlDoc) +} + +// YAMLData loads a yaml document from either http or a file. +func YAMLData(path string, opts ...Option) (interface{}, error) { + data, err := LoadFromFileOrHTTP(path, opts...) + if err != nil { + return nil, err + } + + return yamlutils.BytesToYAMLDoc(data) +} diff --git a/vendor/github.com/go-openapi/swag/loading_iface.go b/vendor/github.com/go-openapi/swag/loading_iface.go new file mode 100644 index 0000000000..38d825bc5e --- /dev/null +++ b/vendor/github.com/go-openapi/swag/loading_iface.go @@ -0,0 +1,102 @@ +// Copyright 2015 go-swagger maintainers +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package swag + +import ( + "encoding/json" + "time" + + "github.com/go-openapi/swag/loading" +) + +var ( + // Package-level defaults for the file loading utilities (deprecated). + + // LoadHTTPTimeout the default timeout for load requests. + // + // Deprecated: use [loading.WithTimeout] instead. + LoadHTTPTimeout = 30 * time.Second + + // LoadHTTPBasicAuthUsername the username to use when load requests require basic auth. + // + // Deprecated: use [loading.WithBasicAuth] instead. + LoadHTTPBasicAuthUsername = "" + + // LoadHTTPBasicAuthPassword the password to use when load requests require basic auth. + // + // Deprecated: use [loading.WithBasicAuth] instead. + LoadHTTPBasicAuthPassword = "" + + // LoadHTTPCustomHeaders an optional collection of custom HTTP headers for load requests. + // + // Deprecated: use [loading.WithCustomHeaders] instead. + LoadHTTPCustomHeaders = map[string]string{} +) + +// LoadFromFileOrHTTP loads the bytes from a file or a remote http server based on the provided path. +// +// Deprecated: use [loading.LoadFromFileOrHTTP] instead. +func LoadFromFileOrHTTP(pth string, opts ...loading.Option) ([]byte, error) { + return loading.LoadFromFileOrHTTP(pth, loadingOptionsWithDefaults(opts)...) +} + +// LoadFromFileOrHTTPWithTimeout loads the bytes from a file or a remote http server based on the path passed in +// timeout arg allows for per request overriding of the request timeout. +// +// Deprecated: use [loading.LoadFileOrHTTP] with the [loading.WithTimeout] option instead. +func LoadFromFileOrHTTPWithTimeout(pth string, timeout time.Duration, opts ...loading.Option) ([]byte, error) { + opts = append(opts, loading.WithTimeout(timeout)) + + return LoadFromFileOrHTTP(pth, opts...) +} + +// LoadStrategy returns a loader function for a given path or URL. +// +// Deprecated: use [loading.LoadStrategy] instead. +func LoadStrategy(pth string, local, remote func(string) ([]byte, error), opts ...loading.Option) func(string) ([]byte, error) { + return loading.LoadStrategy(pth, local, remote, loadingOptionsWithDefaults(opts)...) +} + +// YAMLMatcher matches yaml for a file loader. +// +// Deprecated: use [loading.YAMLMatcher] instead. +func YAMLMatcher(path string) bool { return loading.YAMLMatcher(path) } + +// YAMLDoc loads a yaml document from either http or a file and converts it to json. +// +// Deprecated: use [loading.YAMLDoc] instead. +func YAMLDoc(path string) (json.RawMessage, error) { + return loading.YAMLDoc(path) +} + +// YAMLData loads a yaml document from either http or a file. +// +// Deprecated: use [loading.YAMLData] instead. +func YAMLData(path string) (interface{}, error) { + return loading.YAMLData(path) +} + +// loadingOptionsWithDefaults bridges deprecated default settings that use package-level variables, +// with the recommended use of loading.Option. +func loadingOptionsWithDefaults(opts []loading.Option) []loading.Option { + o := []loading.Option{ + loading.WithTimeout(LoadHTTPTimeout), + loading.WithBasicAuth(LoadHTTPBasicAuthUsername, LoadHTTPBasicAuthPassword), + loading.WithCustomHeaders(LoadHTTPCustomHeaders), + } + o = append(o, opts...) + + return o +} diff --git a/vendor/github.com/go-openapi/swag/BENCHMARK.md b/vendor/github.com/go-openapi/swag/mangling/BENCHMARK.md similarity index 53% rename from vendor/github.com/go-openapi/swag/BENCHMARK.md rename to vendor/github.com/go-openapi/swag/mangling/BENCHMARK.md index e7f28ed6b7..6674c63b72 100644 --- a/vendor/github.com/go-openapi/swag/BENCHMARK.md +++ b/vendor/github.com/go-openapi/swag/mangling/BENCHMARK.md @@ -1,12 +1,10 @@ -# Benchmarks - -## Name mangling utilities +# Benchmarking name mangling utilities ```bash go test -bench XXX -run XXX -benchtime 30s ``` -### Benchmarks at b3e7a5386f996177e4808f11acb2aa93a0f660df +## Benchmarks at b3e7a5386f996177e4808f11acb2aa93a0f660df ``` goos: linux @@ -21,7 +19,7 @@ BenchmarkToXXXName/ToHumanNameLower-4 895334 40354 ns/op 10472 B/op BenchmarkToXXXName/ToHumanNameTitle-4 882441 40678 ns/op 10566 B/op 749 allocs/op ``` -### Benchmarks after PR #79 +## Benchmarks after PR #79 ~ x10 performance improvement and ~ /100 memory allocations. @@ -50,3 +48,43 @@ BenchmarkToXXXName/ToCommandName-16 32256634 1137 ns/op 147 B/op BenchmarkToXXXName/ToHumanNameLower-16 18599661 1946 ns/op 92 B/op 6 allocs/op BenchmarkToXXXName/ToHumanNameTitle-16 17581353 2054 ns/op 105 B/op 6 allocs/op ``` + +## Benchmarks at d7d2d1b895f5b6747afaff312dd2a402e69e818b + +go1.24 + +``` +goos: linux +goarch: amd64 +pkg: github.com/go-openapi/swag +cpu: AMD Ryzen 7 5800X 8-Core Processor +BenchmarkToXXXName/ToGoName-16 19757858 1881 ns/op 42 B/op 5 allocs/op +BenchmarkToXXXName/ToVarName-16 17494111 2094 ns/op 74 B/op 7 allocs/op +BenchmarkToXXXName/ToFileName-16 28161226 1492 ns/op 158 B/op 7 allocs/op +BenchmarkToXXXName/ToCommandName-16 23787333 1489 ns/op 158 B/op 7 allocs/op +BenchmarkToXXXName/ToHumanNameLower-16 17537257 2030 ns/op 103 B/op 6 allocs/op +BenchmarkToXXXName/ToHumanNameTitle-16 16977453 2156 ns/op 105 B/op 6 allocs/op +``` + +## Benchmarks after PR #106 + +Moving the scope of everything down to a struct allowed to reduce a bit garbage and pooling. + +On top of that, ToGoName (and thus ToVarName) have been subject to a minor optimization, removing a few allocations. + +Overall timings improve by ~ -10%. + +go1.24 + +``` +goos: linux +goarch: amd64 +pkg: github.com/go-openapi/swag/mangling +cpu: AMD Ryzen 7 5800X 8-Core Processor +BenchmarkToXXXName/ToGoName-16 22496130 1618 ns/op 31 B/op 3 allocs/op +BenchmarkToXXXName/ToVarName-16 22538068 1618 ns/op 33 B/op 3 allocs/op +BenchmarkToXXXName/ToFileName-16 27722977 1236 ns/op 105 B/op 6 allocs/op +BenchmarkToXXXName/ToCommandName-16 27967395 1258 ns/op 105 B/op 6 allocs/op +BenchmarkToXXXName/ToHumanNameLower-16 18587901 1917 ns/op 103 B/op 6 allocs/op +BenchmarkToXXXName/ToHumanNameTitle-16 17193208 2019 ns/op 108 B/op 7 allocs/op +``` diff --git a/vendor/github.com/go-openapi/swag/mangling/LICENSE b/vendor/github.com/go-openapi/swag/mangling/LICENSE new file mode 100644 index 0000000000..d645695673 --- /dev/null +++ b/vendor/github.com/go-openapi/swag/mangling/LICENSE @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/go-openapi/swag/mangling/doc.go b/vendor/github.com/go-openapi/swag/mangling/doc.go new file mode 100644 index 0000000000..dbe806828c --- /dev/null +++ b/vendor/github.com/go-openapi/swag/mangling/doc.go @@ -0,0 +1,36 @@ +// Copyright 2015 go-swagger maintainers +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package mangling provides name mangling capabilities. +// +// Name mangling is an important stage when generating code: +// it helps construct safe program identifiers that abide by the language rules +// and play along with linters. +// +// Examples: +// +// Suppose we get an object name taken from an API spec: "json_object", +// +// We may generate a legit go type name using [NameMangler.ToGoName]: "JsonObject". +// +// We may then locate this type in a source file named using [NameMangler.ToFileName]: "json_object.go". +// +// The methods exposed by the NameMangler are used to generate code in many different contexts, such as: +// +// - generating exported or unexported go identifiers from a JSON schema or an API spec +// - generating file names +// - generating human-readable comments for types and variables +// - generating JSON-like API identifiers from go code +// - ... +package mangling diff --git a/vendor/github.com/go-openapi/swag/mangling/initialism_index.go b/vendor/github.com/go-openapi/swag/mangling/initialism_index.go new file mode 100644 index 0000000000..cf0786f812 --- /dev/null +++ b/vendor/github.com/go-openapi/swag/mangling/initialism_index.go @@ -0,0 +1,268 @@ +// Copyright 2015 go-swagger maintainers +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package mangling + +import ( + "sort" + "strings" + "unicode" + "unicode/utf8" +) + +// DefaultInitialisms returns all the initialisms configured by default for this package. +// +// # Motivation +// +// Common initialisms are acronyms for which the ordinary camel-casing rules are altered and +// for which we retain the original case. +// +// This is largely specific to the go naming conventions enforced by golint (now revive). +// +// # Example +// +// In go, "id" is a good-looking identifier, but "Id" is not and "ID" is preferred +// (notice that this stems only from conventions: the go compiler accepts all of these). +// +// Similarly, we may use "http", but not "Http". In this case, "HTTP" is preferred. +// +// # Reference and customization +// +// The default list of these casing-style exceptions is taken from the [github.com/mgechev/revive] linter for go: +// https://github.com/mgechev/revive/blob/master/lint/name.go#L93 +// +// There are a few additions to the original list, such as IPv4, IPv6 and OAI ("OpenAPI"). +// +// For these additions, "IPv4" would be preferred to "Ipv4" or "IPV4", and "OAI" to "Oai" +// +// You may redefine this list entirely using the mangler option [WithInitialisms], or simply add extra definitions +// using [WithAdditionalInitialisms]. +// +// # Mixed-case and plurals +// +// Notice that initialisms are not necessarily fully upper-cased: a mixed-case initialism indicates the preferred casing. +// +// Obviously, lower-case only initialisms do not make a lot of sense: if lower-case only initialisms are added, +// they will be considered fully capitalized. +// +// Plural forms use mixed case like "IDs". And so do values like "IPv4" or "IPv6". +// +// The [NameMangler] automatically detects simple plurals for words such as "IDs" or "APIs", +// so you don't need to configure these variants. +// +// At this moment, it doesn't support pluralization of terms that ends with an 's' (or 'S'), since there is +// no clear consensus on whether a word like DNS should be pluralized as DNSes or remain invariant. +// The [NameMangler] consider those invariant. Therefore DNSs or DNSes are not recognized as plurals for DNS. +// +// Besids, we don't want to support pluralization of terms which would otherwise conflict with another one, +// like "HTTPs" vs "HTTPS". All these should be considered invariant. Hence: "Https" matches "HTTPS" and +// "HTTPSS" is "HTTPS" followed by "S". +func DefaultInitialisms() []string { + return []string{ + "ACL", + "API", + "ASCII", + "CPU", + "CSS", + "DNS", + "EOF", + "GUID", + "HTML", + "HTTPS", + "HTTP", + "ID", + "IP", + "IPv4", // prefer the mixed case outcome IPv4 over the capitalized IPV4 + "IPv6", // prefer the mixed case outcome IPv6 over the capitalized IPV6 + "JSON", + "LHS", + "OAI", + "QPS", + "RAM", + "RHS", + "RPC", + "SLA", + "SMTP", + "SQL", + "SSH", + "TCP", + "TLS", + "TTL", + "UDP", + "UI", + "UID", + "UUID", + "URI", + "URL", + "UTF8", + "VM", + "XML", + "XMPP", + "XSRF", + "XSS", + } +} + +type indexOfInitialisms struct { + initialismsCache + + index map[string]struct{} +} + +func newIndexOfInitialisms() *indexOfInitialisms { + return &indexOfInitialisms{ + index: make(map[string]struct{}), + } +} + +func (m *indexOfInitialisms) add(words ...string) *indexOfInitialisms { + for _, word := range words { + // sanitization of injected words: trimmed from blanks, and must start with a letter + trimmed := strings.TrimSpace(word) + + firstRune, _ := utf8.DecodeRuneInString(trimmed) + if !unicode.IsLetter(firstRune) { + continue + } + + // Initialisms are case-sensitive. This means that we support mixed-case words. + // However, if specified as a lower-case string, the initialism should be fully capitalized. + if trimmed == strings.ToLower(trimmed) { + m.index[strings.ToUpper(trimmed)] = struct{}{} + + continue + } + + m.index[trimmed] = struct{}{} + } + return m +} + +func (m *indexOfInitialisms) sorted() []string { + result := make([]string, 0, len(m.index)) + for k := range m.index { + result = append(result, k) + } + sort.Sort(sort.Reverse(byInitialism(result))) + return result +} + +func (m *indexOfInitialisms) buildCache() { + m.build(m.sorted(), m.pluralForm) +} + +// initialismsCache caches all needed pre-computed and converted initialism entries, +// in the desired resolution order. +type initialismsCache struct { + initialisms []string + initialismsRunes [][]rune + initialismsUpperCased [][]rune // initialisms cached in their trimmed, upper-cased version + initialismsPluralForm []pluralForm +} + +func (c *initialismsCache) build(in []string, pluralfunc func(string) pluralForm) { + c.initialisms = in + c.initialismsRunes = asRunes(c.initialisms) + c.initialismsUpperCased = asUpperCased(c.initialisms) + c.initialismsPluralForm = asPluralForms(c.initialisms, pluralfunc) +} + +// pluralForm denotes the kind of pluralization to be used for initialisms. +// +// At this moment, initialisms are either invariant or follow a simple plural form with an +// extra (lower case) "s". +type pluralForm uint8 + +const ( + notPlural pluralForm = iota + invariantPlural + simplePlural +) + +// pluralForm indicates how we want to pluralize a given initialism. +// +// Besides configured invariant forms (like HTTP and HTTPS), +// an initialism is normally pluralized by adding a single 's', like in IDs. +// +// Initialisms ending with an 'S' or an 's' are configured as invariant (we don't +// support plural forms like CSSes or DNSes, however the mechanism could be extended to +// do just that). +func (m *indexOfInitialisms) pluralForm(key string) pluralForm { + if _, ok := m.index[key]; !ok { + return notPlural + } + + if strings.HasSuffix(strings.ToUpper(key), "S") { + return invariantPlural + } + + if _, ok := m.index[key+"s"]; ok { + return invariantPlural + } + + if _, ok := m.index[key+"S"]; ok { + return invariantPlural + } + + return simplePlural +} + +type byInitialism []string + +func (s byInitialism) Len() int { + return len(s) +} +func (s byInitialism) Swap(i, j int) { + s[i], s[j] = s[j], s[i] +} + +// Less specifies the order in which initialisms are prioritized: +// 1. match longest first +// 2. when equal length, match in reverse lexicographical order, lower case match comes first +func (s byInitialism) Less(i, j int) bool { + if len(s[i]) != len(s[j]) { + return len(s[i]) < len(s[j]) + } + + return s[i] < s[j] +} + +func asRunes(in []string) [][]rune { + out := make([][]rune, len(in)) + for i, initialism := range in { + out[i] = []rune(initialism) + } + + return out +} + +func asUpperCased(in []string) [][]rune { + out := make([][]rune, len(in)) + + for i, initialism := range in { + out[i] = []rune(upper(trim(initialism))) + } + + return out +} + +// asPluralForms bakes an index of pluralization support. +func asPluralForms(in []string, pluralFunc func(string) pluralForm) []pluralForm { + out := make([]pluralForm, len(in)) + for i, initialism := range in { + out[i] = pluralFunc(initialism) + } + + return out +} diff --git a/vendor/github.com/go-openapi/swag/mangling/name_lexem.go b/vendor/github.com/go-openapi/swag/mangling/name_lexem.go new file mode 100644 index 0000000000..02004b4f3a --- /dev/null +++ b/vendor/github.com/go-openapi/swag/mangling/name_lexem.go @@ -0,0 +1,197 @@ +// Copyright 2015 go-swagger maintainers +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package mangling + +import ( + "bytes" + "strings" + "unicode" + "unicode/utf8" +) + +type ( + lexemKind uint8 + + nameLexem struct { + original string + matchedInitialism string + kind lexemKind + } +) + +const ( + lexemKindCasualName lexemKind = iota + lexemKindInitialismName +) + +func newInitialismNameLexem(original, matchedInitialism string) nameLexem { + return nameLexem{ + kind: lexemKindInitialismName, + original: original, + matchedInitialism: matchedInitialism, + } +} + +func newCasualNameLexem(original string) nameLexem { + return nameLexem{ + kind: lexemKindCasualName, + original: trim(original), // TODO: save on calls to trim + } +} + +// WriteTitleized writes the titleized lexeme to a bytes.Buffer. +// +// If the first letter cannot be capitalized, it doesn't write anything and return false, +// so the caller may attempt some workaround strategy. +func (l nameLexem) WriteTitleized(w *bytes.Buffer, alwaysUpper bool) bool { + if l.kind == lexemKindInitialismName { + w.WriteString(l.matchedInitialism) + + return true + } + + if len(l.original) == 0 { + return true + } + + if len(l.original) == 1 { + // identifier is too short: casing will depend on the context + firstByte := l.original[0] + switch { + case 'A' <= firstByte && firstByte <= 'Z': + // safe + w.WriteByte(firstByte) + + return true + case alwaysUpper && 'a' <= firstByte && firstByte <= 'z': + w.WriteByte(firstByte - 'a' + 'A') + + return true + default: + + // not a letter: skip and let the caller decide + return false + } + } + + if firstByte := l.original[0]; firstByte < utf8.RuneSelf { + // ASCII + switch { + case 'A' <= firstByte && firstByte <= 'Z': + // already an upper case letter + w.WriteString(l.original) + + return true + case 'a' <= firstByte && firstByte <= 'z': + w.WriteByte(firstByte - 'a' + 'A') + w.WriteString(l.original[1:]) + + return true + default: + // not a good candidate: doesn't start with a letter + return false + } + } + + // unicode + firstRune, idx := utf8.DecodeRuneInString(l.original) + if !unicode.IsLetter(firstRune) || !unicode.IsUpper(unicode.ToUpper(firstRune)) { + // not a good candidate: doesn't start with a letter + // or a rune for which case doesn't make sense (e.g. East-Asian runes etc) + return false + } + + rest := l.original[idx:] + w.WriteRune(unicode.ToUpper(firstRune)) + w.WriteString(strings.ToLower(rest)) + + return true +} + +// WriteLower is like write titleized but it writes a lower-case version of the lexeme. +// +// Similarly, there is no writing if the casing of the first rune doesn't make sense. +func (l nameLexem) WriteLower(w *bytes.Buffer, alwaysLower bool) bool { + if l.kind == lexemKindInitialismName { + w.WriteString(lower(l.matchedInitialism)) + + return true + } + + if len(l.original) == 0 { + return true + } + + if len(l.original) == 1 { + // identifier is too short: casing will depend on the context + firstByte := l.original[0] + switch { + case 'a' <= firstByte && firstByte <= 'z': + // safe + w.WriteByte(firstByte) + + return true + case alwaysLower && 'A' <= firstByte && firstByte <= 'Z': + w.WriteByte(firstByte - 'A' + 'a') + + return true + default: + + // not a letter: skip and let the caller decide + return false + } + } + + if firstByte := l.original[0]; firstByte < utf8.RuneSelf { + // ASCII + switch { + case 'a' <= firstByte && firstByte <= 'z': + // already a lower case letter + w.WriteString(l.original) + + return true + case 'A' <= firstByte && firstByte <= 'Z': + w.WriteByte(firstByte - 'A' + 'a') + w.WriteString(l.original[1:]) + + return true + default: + // not a good candidate: doesn't start with a letter + return false + } + } + + // unicode + firstRune, idx := utf8.DecodeRuneInString(l.original) + if !unicode.IsLetter(firstRune) || !unicode.IsLower(unicode.ToLower(firstRune)) { + // not a good candidate: doesn't start with a letter + // or a rune for which case doesn't make sense (e.g. East-Asian runes etc) + return false + } + + rest := l.original[idx:] + w.WriteRune(unicode.ToLower(firstRune)) + w.WriteString(rest) + + return true +} + +func (l nameLexem) GetOriginal() string { + return l.original +} + +func (l nameLexem) IsInitialism() bool { + return l.kind == lexemKindInitialismName +} diff --git a/vendor/github.com/go-openapi/swag/mangling/name_mangler.go b/vendor/github.com/go-openapi/swag/mangling/name_mangler.go new file mode 100644 index 0000000000..94ae555a7b --- /dev/null +++ b/vendor/github.com/go-openapi/swag/mangling/name_mangler.go @@ -0,0 +1,381 @@ +// Copyright 2015 go-swagger maintainers +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package mangling + +import ( + "strings" + "unicode" +) + +// NameMangler knows how to transform sentences or words into +// identifiers that are a better fit in contexts such as: +// +// - unexported or exported go variable identifiers +// - file names +// - camel cased identifiers +// - ... +// +// The [NameMangler] is safe for concurrent use, save for its [NameMangler.AddInitialisms] method, +// which is not. +// +// # Known limitations +// +// At this moment, the [NameMangler] doesn't play well with "all caps" text: +// +// unless every single upper-cased word is declared as an initialism, capitalized words would generally +// not be transformed with the expected result, e.g. +// +// ToFileName("THIS_IS_ALL_CAPS") +// +// yields the weird outcome +// +// "t_h_i_s_i_s_a_l_l_c_a_p_s" +type NameMangler struct { + options + + index *indexOfInitialisms + + splitter splitter + splitterWithPostSplit splitter + + _ struct{} +} + +// NewNameMangler builds a name mangler ready to convert strings. +// +// The default name mangler is configured with default common initialisms and all default options. +func NewNameMangler(opts ...Option) NameMangler { + m := NameMangler{ + options: optionsWithDefaults(opts), + index: newIndexOfInitialisms(), + } + m.addInitialisms(m.commonInitialisms...) + + // a splitter that returns matches lexemes as ready-to-assemble strings: + // details of the lexemes are redeemed. + m.splitter = newSplitter( + withInitialismsCache(&m.index.initialismsCache), + withReplaceFunc(m.replaceFunc), + ) + + // a splitter that returns matches lexemes ready for post-processing + m.splitterWithPostSplit = newSplitter( + withInitialismsCache(&m.index.initialismsCache), + withReplaceFunc(m.replaceFunc), + withPostSplitInitialismCheck, + ) + + return m +} + +// AddInitialisms declares extra initialisms to the mangler. +// +// It declares extra words as "initialisms" (i.e. words that won't be camel cased or titled cased), +// on top of the existing list of common initialisms (such as ID, HTTP...). +// +// Added words must start with a (unicode) letter. If some don't, they are ignored. +// Added words are either fully capitalized or mixed-cased. Lower-case only words are considered capitalized. +// +// It is typically used just after initializing the [NameMangler]. +// +// When all initialisms are known at the time the mangler is initialized, it is preferable to +// use [NewNameMangler] with the option [WithAdditionalInitialisms]. +// +// Adding initialisms mutates the mangler and should not be carried out concurrently with other calls to the mangler. +func (m *NameMangler) AddInitialisms(words ...string) { + m.addInitialisms(words...) +} + +// Initialisms renders the list of initialisms supported by this mangler. +func (m *NameMangler) Initialisms() []string { + return m.index.initialisms +} + +// Camelize a single word. +// +// Example: +// +// - "HELLO" and "hello" become "Hello". +func (m NameMangler) Camelize(word string) string { + ru := []rune(word) + + switch len(ru) { + case 0: + return "" + case 1: + return string(unicode.ToUpper(ru[0])) + default: + camelized := poolOfBuffers.BorrowBuffer(len(word)) + camelized.Grow(len(word)) + defer func() { + poolOfBuffers.RedeemBuffer(camelized) + }() + + camelized.WriteRune(unicode.ToUpper(ru[0])) + for _, ru := range ru[1:] { + camelized.WriteRune(unicode.ToLower(ru)) + } + + return camelized.String() + } +} + +// ToFileName generates a suitable snake-case file name from a sentence. +// +// It lower-cases everything with underscore (_) as a word separator. +// +// Examples: +// +// - "Hello, Swagger" becomes "hello_swagger" +// - "HelloSwagger" becomes "hello_swagger" +func (m NameMangler) ToFileName(name string) string { + inptr := m.split(name) + in := *inptr + out := make([]string, 0, len(in)) + + for _, w := range in { + out = append(out, lower(w)) + } + poolOfStrings.RedeemStrings(inptr) + + return strings.Join(out, "_") +} + +// ToCommandName generates a suitable CLI command name from a sentence. +// +// It lower-cases everything with dash (-) as a word separator. +// +// Examples: +// +// - "Hello, Swagger" becomes "hello-swagger" +// - "HelloSwagger" becomes "hello-swagger" +func (m NameMangler) ToCommandName(name string) string { + inptr := m.split(name) + in := *inptr + out := make([]string, 0, len(in)) + + for _, w := range in { + out = append(out, lower(w)) + } + poolOfStrings.RedeemStrings(inptr) + + return strings.Join(out, "-") +} + +// ToHumanNameLower represents a code name as a human-readable series of words. +// +// It lower-cases everything with blank space as a word separator. +// +// NOTE: parts recognized as initialisms just keep their original casing. +// +// Examples: +// +// - "Hello, Swagger" becomes "hello swagger" +// - "HelloSwagger" or "Hello-Swagger" become "hello swagger" +func (m NameMangler) ToHumanNameLower(name string) string { + s := m.splitterWithPostSplit + in := s.split(name) + out := make([]string, 0, len(*in)) + + for _, w := range *in { + if !w.IsInitialism() { + out = append(out, lower(w.GetOriginal())) + } else { + out = append(out, trim(w.GetOriginal())) + } + } + + poolOfLexems.RedeemLexems(in) + + return strings.Join(out, " ") +} + +// ToHumanNameTitle represents a code name as a human-readable series of titleized words. +// +// It titleizes every word with blank space as a word separator. +// +// Examples: +// +// - "hello, Swagger" becomes "Hello Swagger" +// - "helloSwagger" becomes "Hello Swagger" +func (m NameMangler) ToHumanNameTitle(name string) string { + s := m.splitterWithPostSplit + in := s.split(name) + + out := make([]string, 0, len(*in)) + for _, w := range *in { + original := trim(w.GetOriginal()) + if !w.IsInitialism() { + out = append(out, m.Camelize(original)) + } else { + out = append(out, original) + } + } + poolOfLexems.RedeemLexems(in) + + return strings.Join(out, " ") +} + +// ToJSONName generates a camelized single-word version of a sentence. +// +// The output assembles every camelized word, but for the first word, which +// is lower-cased. +// +// Example: +// +// - "Hello_swagger" becomes "helloSwagger" +func (m NameMangler) ToJSONName(name string) string { + inptr := m.split(name) + in := *inptr + out := make([]string, 0, len(in)) + + for i, w := range in { + if i == 0 { + out = append(out, lower(w)) + continue + } + out = append(out, m.Camelize(trim(w))) + } + + poolOfStrings.RedeemStrings(inptr) + + return strings.Join(out, "") +} + +// ToVarName generates a legit unexported go variable name from a sentence. +// +// The generated name plays well with linters (see also [NameMangler.ToGoName]). +// +// Examples: +// +// - "Hello_swagger" becomes "helloSwagger" +// - "Http_server" becomes "httpServer" +// +// This name applies the same rules as [NameMangler.ToGoName] (legit exported variable), save the +// capitalization of the initial rune. +// +// Special case: when the initial part is a recognized as an initialism (like in the example above), +// the full part is lower-cased. +func (m NameMangler) ToVarName(name string) string { + return m.goIdentifier(name, false) +} + +// ToGoName generates a legit exported go variable name from a sentence. +// +// The generated name plays well with most linters. +// +// ToGoName abides by the go "exported" symbol rule starting with an upper-case letter. +// +// Examples: +// +// - "hello_swagger" becomes "HelloSwagger" +// - "Http_server" becomes "HTTPServer" +// +// # Edge cases +// +// Whenever the first rune is not eligible to upper case, a special prefix is prepended to the resulting name. +// By default this is simply "X" and you may customize this behavior using the [WithGoNamePrefixFunc] option. +// +// This happens when the first rune is not a letter, e.g. a digit, or a symbol that has no word transliteration +// (see also [WithReplaceFunc] about symbol transliterations), +// as well as for most East Asian or Devanagari runes, for which there is no such concept as upper-case. +// +// # Linting +// +// [revive], the successor of golint is the reference linter. +// +// This means that [NameMangler.ToGoName] supports the initialisms that revive checks (see also [DefaultInitialisms]). +// +// At this moment, there is no attempt to transliterate unicode into ascii, meaning that some linters +// (e.g. asciicheck, gosmopolitan) may croak on go identifiers generated from unicode input. +// +// [revive]: https://github.com/mgechev/revive +func (m NameMangler) ToGoName(name string) string { + return m.goIdentifier(name, true) +} + +func (m NameMangler) goIdentifier(name string, exported bool) string { + s := m.splitterWithPostSplit + lexems := s.split(name) + defer func() { + poolOfLexems.RedeemLexems(lexems) + }() + lexemes := *lexems + + if len(lexemes) == 0 { + return "" + } + + result := poolOfBuffers.BorrowBuffer(len(name)) + defer func() { + poolOfBuffers.RedeemBuffer(result) + }() + + firstPart := lexemes[0] + if !exported { + if ok := firstPart.WriteLower(result, true); !ok { + // NOTE: an initialism as the first part is lower-cased: no longer generates stuff like hTTPxyz. + // + // same prefixing rule applied to unexported variable as to an exported one, so that we have consistent + // names, whether the generated identifier is exported or not. + result.WriteString(strings.ToLower(m.prefixFunc()(name))) + result.WriteString(lexemes[0].GetOriginal()) + } + } else { + if ok := firstPart.WriteTitleized(result, true); !ok { + // "repairs" a lexeme that doesn't start with a letter to become + // the start a legit go name. The current strategy is very crude and simply adds a fixed prefix, + // e.g. "X". + // For instance "1_sesame_street" would be split into lexemes ["1", "sesame", "street"] and + // the first one ("1") would result in something like "X1" (with the default prefix function). + // + // NOTE: no longer forcing the first part to be fully upper-cased + result.WriteString(m.prefixFunc()(name)) + result.WriteString(lexemes[0].GetOriginal()) + } + } + + for _, lexem := range lexemes[1:] { + // NOTE: no longer forcing initialism parts to be fully upper-cased: + // * pluralized initialism preserve their trailing "s" + // * mixed-cased initialisms, such as IPv4, are preserved + if ok := lexem.WriteTitleized(result, false); !ok { + // it's not titleized: perhaps it's too short, perhaps the first rune is not a letter. + // write anyway + result.WriteString(lexem.GetOriginal()) + } + } + + return result.String() +} + +func (m *NameMangler) addInitialisms(words ...string) { + m.index.add(words...) + m.index.buildCache() +} + +// split calls the inner splitter. +func (m NameMangler) split(str string) *[]string { + s := m.splitter + lexems := s.split(str) + result := poolOfStrings.BorrowStrings() + + for _, lexem := range *lexems { + *result = append(*result, lexem.GetOriginal()) + } + poolOfLexems.RedeemLexems(lexems) + + return result +} diff --git a/vendor/github.com/go-openapi/swag/mangling/options.go b/vendor/github.com/go-openapi/swag/mangling/options.go new file mode 100644 index 0000000000..66ad2e46c7 --- /dev/null +++ b/vendor/github.com/go-openapi/swag/mangling/options.go @@ -0,0 +1,161 @@ +// Copyright 2015 go-swagger maintainers +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package mangling + +type ( + // PrefixFunc defines a safeguard rule (that may depend on the input string), to prefix + // a generated go name (in [NameMangler.ToGoName] and [NameMangler.ToVarName]). + // + // See [NameMangler.ToGoName] for more about which edge cases the prefix function covers. + PrefixFunc func(string) string + + // ReplaceFunc is a transliteration function to replace special runes by a word. + ReplaceFunc func(r rune) (string, bool) + + // Option to configure a [NameMangler]. + Option func(*options) + + options struct { + commonInitialisms []string + + goNamePrefixFunc PrefixFunc + goNamePrefixFuncPtr *PrefixFunc + replaceFunc func(r rune) (string, bool) + } +) + +func (o *options) prefixFunc() PrefixFunc { + if o.goNamePrefixFuncPtr != nil && *o.goNamePrefixFuncPtr != nil { + return *o.goNamePrefixFuncPtr + } + + return o.goNamePrefixFunc +} + +// WithGoNamePrefixFunc overrides the default prefix rule to safeguard generated go names. +// +// Example: +// +// This helps convert "123" into "{prefix}123" (a very crude strategy indeed, but it works). +// +// See [github.com/go-swagger/go-swagger/generator.DefaultFuncMap] for an example. +// +// The prefix function is assumed to return a string that starts with an upper case letter. +// +// The default is to prefix with "X". +// +// See [NameMangler.ToGoName] for more about which edge cases the prefix function covers. +func WithGoNamePrefixFunc(fn PrefixFunc) Option { + return func(o *options) { + o.goNamePrefixFunc = fn + } +} + +// WithGoNamePrefixFuncPtr is like [WithGoNamePrefixFunc] but it specifies a pointer to a function. +// +// [WithGoNamePrefixFunc] should be preferred in most situations. This option should only serve the +// purpose of handling special situations where the prefix function is not an internal variable +// (e.g. an exported package global). +// +// [WithGoNamePrefixFuncPtr] supersedes [WithGoNamePrefixFunc] if it also specified. +// +// If the provided pointer is nil or points to a nil value, this option has no effect. +// +// The caller should ensure that no undesirable concurrent changes are applied to the function pointed to. +func WithGoNamePrefixFuncPtr(ptr *PrefixFunc) Option { + return func(o *options) { + o.goNamePrefixFuncPtr = ptr + } +} + +// WithInitialisms declares the initialisms this mangler supports. +// +// This supersedes any pre-loaded defaults (see [DefaultInitialisms] for more about what initialisms are). +// +// It declares words to be recognized as "initialisms" (i.e. words that won't be camel cased or titled cased). +// +// Words must start with a (unicode) letter. If some don't, they are ignored. +// Words are either fully capitalized or mixed-cased. Lower-case only words are considered capitalized. +func WithInitialisms(words ...string) Option { + return func(o *options) { + o.commonInitialisms = words + } +} + +// WithAdditionalInitialisms adds new initialisms to the currently supported list (see [DefaultInitialisms]). +// +// The same sanitization rules apply as those described for [WithInitialisms]. +func WithAdditionalInitialisms(words ...string) Option { + return func(o *options) { + o.commonInitialisms = append(o.commonInitialisms, words...) + } +} + +// WithReplaceFunc specifies a custom transliteration function instead of the default. +// +// The default translates the following characters into words as follows: +// +// - '@' -> 'At' +// - '&' -> 'And' +// - '|' -> 'Pipe' +// - '$' -> 'Dollar' +// - '!' -> 'Bang' +// +// Notice that the outcome of a transliteration should always be titleized. +func WithReplaceFunc(fn ReplaceFunc) Option { + return func(o *options) { + o.replaceFunc = fn + } +} + +func defaultPrefixFunc(_ string) string { + return "X" +} + +// defaultReplaceTable finds a word representation for special characters. +func defaultReplaceTable(r rune) (string, bool) { + switch r { + case '@': + return "At ", true + case '&': + return "And ", true + case '|': + return "Pipe ", true + case '$': + return "Dollar ", true + case '!': + return "Bang ", true + case '-': + return "", true + case '_': + return "", true + default: + return "", false + } +} + +func optionsWithDefaults(opts []Option) options { + o := options{ + commonInitialisms: DefaultInitialisms(), + goNamePrefixFunc: defaultPrefixFunc, + replaceFunc: defaultReplaceTable, + } + + for _, apply := range opts { + apply(&o) + } + + return o +} diff --git a/vendor/github.com/go-openapi/swag/mangling/pools.go b/vendor/github.com/go-openapi/swag/mangling/pools.go new file mode 100644 index 0000000000..d85b403877 --- /dev/null +++ b/vendor/github.com/go-openapi/swag/mangling/pools.go @@ -0,0 +1,134 @@ +// Copyright 2015 go-swagger maintainers +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package mangling + +import ( + "bytes" + "sync" +) + +const maxAllocMatches = 8 + +type ( + // memory pools of temporary objects. + // + // These are used to recycle temporarily allocated objects + // and relieve the GC from undue pressure. + + matchesPool struct { + *sync.Pool + } + + buffersPool struct { + *sync.Pool + } + + lexemsPool struct { + *sync.Pool + } + + stringsPool struct { + *sync.Pool + } +) + +var ( + // poolOfMatches holds temporary slices for recycling during the initialism match process + poolOfMatches = matchesPool{ + Pool: &sync.Pool{ + New: func() any { + s := make(initialismMatches, 0, maxAllocMatches) + + return &s + }, + }, + } + + poolOfBuffers = buffersPool{ + Pool: &sync.Pool{ + New: func() any { + return new(bytes.Buffer) + }, + }, + } + + poolOfLexems = lexemsPool{ + Pool: &sync.Pool{ + New: func() any { + s := make([]nameLexem, 0, maxAllocMatches) + + return &s + }, + }, + } + + poolOfStrings = stringsPool{ + Pool: &sync.Pool{ + New: func() any { + s := make([]string, 0, maxAllocMatches) + + return &s + }, + }, + } +) + +func (p matchesPool) BorrowMatches() *initialismMatches { + s := p.Get().(*initialismMatches) + *s = (*s)[:0] // reset slice, keep allocated capacity + + return s +} + +func (p buffersPool) BorrowBuffer(size int) *bytes.Buffer { + s := p.Get().(*bytes.Buffer) + s.Reset() + + if s.Cap() < size { + s.Grow(size) + } + + return s +} + +func (p lexemsPool) BorrowLexems() *[]nameLexem { + s := p.Get().(*[]nameLexem) + *s = (*s)[:0] // reset slice, keep allocated capacity + + return s +} + +func (p stringsPool) BorrowStrings() *[]string { + s := p.Get().(*[]string) + *s = (*s)[:0] // reset slice, keep allocated capacity + + return s +} + +func (p matchesPool) RedeemMatches(s *initialismMatches) { + p.Put(s) +} + +func (p buffersPool) RedeemBuffer(s *bytes.Buffer) { + p.Put(s) +} + +func (p lexemsPool) RedeemLexems(s *[]nameLexem) { + p.Put(s) +} + +func (p stringsPool) RedeemStrings(s *[]string) { + p.Put(s) +} diff --git a/vendor/github.com/go-openapi/swag/split.go b/vendor/github.com/go-openapi/swag/mangling/split.go similarity index 52% rename from vendor/github.com/go-openapi/swag/split.go rename to vendor/github.com/go-openapi/swag/mangling/split.go index 274727a866..40e4a2e0e1 100644 --- a/vendor/github.com/go-openapi/swag/split.go +++ b/vendor/github.com/go-openapi/swag/mangling/split.go @@ -12,212 +12,66 @@ // See the License for the specific language governing permissions and // limitations under the License. -package swag +package mangling import ( - "bytes" - "sync" "unicode" - "unicode/utf8" ) -type ( - splitter struct { - initialisms []string - initialismsRunes [][]rune - initialismsUpperCased [][]rune // initialisms cached in their trimmed, upper-cased version - postSplitInitialismCheck bool +type splitterOption func(*splitter) + +// withPostSplitInitialismCheck allows to catch initialisms after main split process +func withPostSplitInitialismCheck(s *splitter) { + s.postSplitInitialismCheck = true +} + +func withReplaceFunc(fn ReplaceFunc) func(*splitter) { + return func(s *splitter) { + s.replaceFunc = fn } +} - splitterOption func(*splitter) +func withInitialismsCache(c *initialismsCache) splitterOption { + return func(s *splitter) { + s.initialismsCache = c + } +} +type ( initialismMatch struct { body []rune start, end int complete bool + hasPlural pluralForm } initialismMatches []initialismMatch ) -type ( - // memory pools of temporary objects. - // - // These are used to recycle temporarily allocated objects - // and relieve the GC from undue pressure. - - matchesPool struct { - *sync.Pool - } - - buffersPool struct { - *sync.Pool - } - - lexemsPool struct { - *sync.Pool - } - - splittersPool struct { - *sync.Pool - } -) - -var ( - // poolOfMatches holds temporary slices for recycling during the initialism match process - poolOfMatches = matchesPool{ - Pool: &sync.Pool{ - New: func() any { - s := make(initialismMatches, 0, maxAllocMatches) - - return &s - }, - }, - } - - poolOfBuffers = buffersPool{ - Pool: &sync.Pool{ - New: func() any { - return new(bytes.Buffer) - }, - }, - } - - poolOfLexems = lexemsPool{ - Pool: &sync.Pool{ - New: func() any { - s := make([]nameLexem, 0, maxAllocMatches) - - return &s - }, - }, - } - - poolOfSplitters = splittersPool{ - Pool: &sync.Pool{ - New: func() any { - s := newSplitter() - - return &s - }, - }, - } -) - -// nameReplaceTable finds a word representation for special characters. -func nameReplaceTable(r rune) (string, bool) { - switch r { - case '@': - return "At ", true - case '&': - return "And ", true - case '|': - return "Pipe ", true - case '$': - return "Dollar ", true - case '!': - return "Bang ", true - case '-': - return "", true - case '_': - return "", true - default: - return "", false - } +func (m initialismMatch) isZero() bool { + return m.start == 0 && m.end == 0 } -// split calls the splitter. -// -// Use newSplitter for more control and options -func split(str string) []string { - s := poolOfSplitters.BorrowSplitter() - lexems := s.split(str) - result := make([]string, 0, len(*lexems)) - - for _, lexem := range *lexems { - result = append(result, lexem.GetOriginal()) - } - poolOfLexems.RedeemLexems(lexems) - poolOfSplitters.RedeemSplitter(s) - - return result +type splitter struct { + *initialismsCache + postSplitInitialismCheck bool + replaceFunc ReplaceFunc } func newSplitter(options ...splitterOption) splitter { - s := splitter{ - postSplitInitialismCheck: false, - initialisms: initialisms, - initialismsRunes: initialismsRunes, - initialismsUpperCased: initialismsUpperCased, - } + var s splitter for _, option := range options { option(&s) } - return s -} - -// withPostSplitInitialismCheck allows to catch initialisms after main split process -func withPostSplitInitialismCheck(s *splitter) { - s.postSplitInitialismCheck = true -} - -func (p matchesPool) BorrowMatches() *initialismMatches { - s := p.Get().(*initialismMatches) - *s = (*s)[:0] // reset slice, keep allocated capacity - - return s -} - -func (p buffersPool) BorrowBuffer(size int) *bytes.Buffer { - s := p.Get().(*bytes.Buffer) - s.Reset() - - if s.Cap() < size { - s.Grow(size) - } - - return s -} - -func (p lexemsPool) BorrowLexems() *[]nameLexem { - s := p.Get().(*[]nameLexem) - *s = (*s)[:0] // reset slice, keep allocated capacity - - return s -} - -func (p splittersPool) BorrowSplitter(options ...splitterOption) *splitter { - s := p.Get().(*splitter) - s.postSplitInitialismCheck = false // reset options - for _, apply := range options { - apply(s) + if s.replaceFunc == nil { + s.replaceFunc = defaultReplaceTable } return s } -func (p matchesPool) RedeemMatches(s *initialismMatches) { - p.Put(s) -} - -func (p buffersPool) RedeemBuffer(s *bytes.Buffer) { - p.Put(s) -} - -func (p lexemsPool) RedeemLexems(s *[]nameLexem) { - p.Put(s) -} - -func (p splittersPool) RedeemSplitter(s *splitter) { - p.Put(s) -} - -func (m initialismMatch) isZero() bool { - return m.start == 0 && m.end == 0 -} - func (s splitter) split(name string) *[]nameLexem { nameRunes := []rune(name) matches := s.gatherInitialismMatches(nameRunes) @@ -242,25 +96,68 @@ func (s splitter) gatherInitialismMatches(nameRunes []rune) *initialismMatches { for _, match := range *matches { if keepCompleteMatch := match.complete; keepCompleteMatch { *newMatches = append(*newMatches, match) + + // the match is complete: keep it then move on to next rune continue } - // drop failed match currentMatchRune := match.body[currentRunePosition-match.start] if currentMatchRune != currentRune { + // failed match, move on to next rune continue } // try to complete ongoing match if currentRunePosition-match.start == len(match.body)-1 { // we are close; the next step is to check the symbol ahead - // if it is a small letter, then it is not the end of match - // but beginning of the next word + // if it is a lowercase letter, then it is not the end of match + // but the beginning of the next word. + // + // NOTE(fredbi): this heuristic sometimes leads to counterintuitive splits and + // perhaps (not sure yet) we should check against case _alternance_. + // + // Example: + // + // In the current version, in the sentence "IDS initialism", "ID" is recognized as an initialism, + // leading to a split like "id_s_initialism" (or IDSInitialism), + // whereas in the sentence "IDx initialism", it is not and produces something like + // "i_d_x_initialism" (or IDxInitialism). The generated file name is not great. + // + // Both go identifiers are tolerated by linters. + // + // Notice that the slightly different input "IDs initialism" is correctly detected + // as a pluralized initialism and produces something like "ids_initialism" (or IDsInitialism). if currentRunePosition < len(nameRunes)-1 { nextRune := nameRunes[currentRunePosition+1] + + // recognize a plural form for this initialism (only simple pluralization is supported) + if nextRune == 's' && match.hasPlural == simplePlural { + // detected a pluralized initialism + match.body = append(match.body, nextRune) + currentRunePosition++ + if currentRunePosition < len(nameRunes)-1 { + nextRune = nameRunes[currentRunePosition+1] + if newWord := unicode.IsLower(nextRune); newWord { + // it is the start of a new word. + // Match is only partial and the initialism is not recognized : move on + continue + } + } + + // this is a pluralized match: keep it + match.complete = true + match.hasPlural = simplePlural + match.end = currentRunePosition + *newMatches = append(*newMatches, match) + + // match is complete: keep it then move on to next rune + continue + } + if newWord := unicode.IsLower(nextRune); newWord { - // oh ok, it was the start of a new word + // it is the start of a new word + // Match is only partial and the initialism is not recognized : move on continue } } @@ -269,18 +166,19 @@ func (s splitter) gatherInitialismMatches(nameRunes []rune) *initialismMatches { match.end = currentRunePosition } + // append the ongoing matching attempt (not necessarily complete) *newMatches = append(*newMatches, match) } } // check for new initialism matches - for i := range s.initialisms { - initialismRunes := s.initialismsRunes[i] - if initialismRunes[0] == currentRune { + for i, r := range s.initialismsRunes { + if r[0] == currentRune { *newMatches = append(*newMatches, initialismMatch{ - start: currentRunePosition, - body: initialismRunes, - complete: false, + start: currentRunePosition, + body: r, + complete: false, + hasPlural: s.initialismsPluralForm[i], }) } } @@ -373,8 +271,10 @@ func (s splitter) appendBrokenDownCasualString(segments *[]nameLexem, str []rune addNameLexem = addCasualNameLexem } + // NOTE: (performance). The few remaining non-amortized allocations + // lay in the code below: using String() forces for _, rn := range str { - if replace, found := nameReplaceTable(rn); found { + if replace, found := s.replaceFunc(rn); found { if currentSegment.Len() > 0 { addNameLexem(currentSegment.String()) currentSegment.Reset() @@ -410,99 +310,3 @@ func (s splitter) appendBrokenDownCasualString(segments *[]nameLexem, str []rune addNameLexem(currentSegment.String()) } } - -// isEqualFoldIgnoreSpace is the same as strings.EqualFold, but -// it ignores leading and trailing blank spaces in the compared -// string. -// -// base is assumed to be composed of upper-cased runes, and be already -// trimmed. -// -// This code is heavily inspired from strings.EqualFold. -func isEqualFoldIgnoreSpace(base []rune, str string) bool { - var i, baseIndex int - // equivalent to b := []byte(str), but without data copy - b := hackStringBytes(str) - - for i < len(b) { - if c := b[i]; c < utf8.RuneSelf { - // fast path for ASCII - if c != ' ' && c != '\t' { - break - } - i++ - - continue - } - - // unicode case - r, size := utf8.DecodeRune(b[i:]) - if !unicode.IsSpace(r) { - break - } - i += size - } - - if i >= len(b) { - return len(base) == 0 - } - - for _, baseRune := range base { - if i >= len(b) { - break - } - - if c := b[i]; c < utf8.RuneSelf { - // single byte rune case (ASCII) - if baseRune >= utf8.RuneSelf { - return false - } - - baseChar := byte(baseRune) - if c != baseChar && - !('a' <= c && c <= 'z' && c-'a'+'A' == baseChar) { - return false - } - - baseIndex++ - i++ - - continue - } - - // unicode case - r, size := utf8.DecodeRune(b[i:]) - if unicode.ToUpper(r) != baseRune { - return false - } - baseIndex++ - i += size - } - - if baseIndex != len(base) { - return false - } - - // all passed: now we should only have blanks - for i < len(b) { - if c := b[i]; c < utf8.RuneSelf { - // fast path for ASCII - if c != ' ' && c != '\t' { - return false - } - i++ - - continue - } - - // unicode case - r, size := utf8.DecodeRune(b[i:]) - if !unicode.IsSpace(r) { - return false - } - - i += size - } - - return true -} diff --git a/vendor/github.com/go-openapi/swag/mangling/string_bytes.go b/vendor/github.com/go-openapi/swag/mangling/string_bytes.go new file mode 100644 index 0000000000..06351434d3 --- /dev/null +++ b/vendor/github.com/go-openapi/swag/mangling/string_bytes.go @@ -0,0 +1,22 @@ +// Copyright 2015 go-swagger maintainers +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package mangling + +import "unsafe" + +// hackStringBytes returns the (unsafe) underlying bytes slice of a string. +func hackStringBytes(str string) []byte { + return unsafe.Slice(unsafe.StringData(str), len(str)) +} diff --git a/vendor/github.com/go-openapi/swag/mangling/util.go b/vendor/github.com/go-openapi/swag/mangling/util.go new file mode 100644 index 0000000000..c289dc6bdd --- /dev/null +++ b/vendor/github.com/go-openapi/swag/mangling/util.go @@ -0,0 +1,129 @@ +// Copyright 2015 go-swagger maintainers +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package mangling + +import ( + "strings" + "unicode" + "unicode/utf8" +) + +// Removes leading whitespaces +func trim(str string) string { return strings.TrimSpace(str) } + +// upper is strings.ToUpper() combined with trim +func upper(str string) string { + return strings.ToUpper(trim(str)) +} + +// lower is strings.ToLower() combined with trim +func lower(str string) string { + return strings.ToLower(trim(str)) +} + +// isEqualFoldIgnoreSpace is the same as strings.EqualFold, but +// it ignores leading and trailing blank spaces in the compared +// string. +// +// base is assumed to be composed of upper-cased runes, and be already +// trimmed. +// +// This code is heavily inspired from strings.EqualFold. +func isEqualFoldIgnoreSpace(base []rune, str string) bool { + var i, baseIndex int + // equivalent to b := []byte(str), but without data copy + b := hackStringBytes(str) + + for i < len(b) { + if c := b[i]; c < utf8.RuneSelf { + // fast path for ASCII + if c != ' ' && c != '\t' { + break + } + i++ + + continue + } + + // unicode case + r, size := utf8.DecodeRune(b[i:]) + if !unicode.IsSpace(r) { + break + } + i += size + } + + if i >= len(b) { + return len(base) == 0 + } + + for _, baseRune := range base { + if i >= len(b) { + break + } + + if c := b[i]; c < utf8.RuneSelf { + // single byte rune case (ASCII) + if baseRune >= utf8.RuneSelf { + return false + } + + baseChar := byte(baseRune) + if c != baseChar && ((c < 'a') || (c > 'z') || (c-'a'+'A' != baseChar)) { + return false + } + + baseIndex++ + i++ + + continue + } + + // unicode case + r, size := utf8.DecodeRune(b[i:]) + if unicode.ToUpper(r) != baseRune { + return false + } + baseIndex++ + i += size + } + + if baseIndex != len(base) { + return false + } + + // all passed: now we should only have blanks + for i < len(b) { + if c := b[i]; c < utf8.RuneSelf { + // fast path for ASCII + if c != ' ' && c != '\t' { + return false + } + i++ + + continue + } + + // unicode case + r, size := utf8.DecodeRune(b[i:]) + if !unicode.IsSpace(r) { + return false + } + + i += size + } + + return true +} diff --git a/vendor/github.com/go-openapi/swag/mangling_iface.go b/vendor/github.com/go-openapi/swag/mangling_iface.go new file mode 100644 index 0000000000..2d0d07ddb6 --- /dev/null +++ b/vendor/github.com/go-openapi/swag/mangling_iface.go @@ -0,0 +1,80 @@ +// Copyright 2015 go-swagger maintainers +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package swag + +import "github.com/go-openapi/swag/mangling" + +// GoNamePrefixFunc sets an optional rule to prefix go names +// which do not start with a letter. +// +// GoNamePrefixFunc should not be written to while concurrently using the other mangling functions of this package. +// +// Deprecated: use [mangling.WithGoNamePrefixFunc] instead. +var GoNamePrefixFunc mangling.PrefixFunc + +// swagNameMangler is a global instance of the name mangler specifically alloted +// to support deprecated functions. +var swagNameMangler = mangling.NewNameMangler( + mangling.WithGoNamePrefixFuncPtr(&GoNamePrefixFunc), +) + +// AddInitialisms adds additional initialisms to the default list (see [mangling.DefaultInitialisms]). +// +// AddInitialisms is not safe to be called concurrently. +// +// Deprecated: use [mangling.WithAdditionalInitialisms] instead. +func AddInitialisms(words ...string) { + swagNameMangler.AddInitialisms(words...) +} + +// Camelize a single word. +// +// Deprecated: use [mangling.NameMangler.Camelize] instead. +func Camelize(word string) string { return swagNameMangler.Camelize(word) } + +// ToFileName lowercases and underscores a go type name. +// +// Deprecated: use [mangling.NameMangler.ToFileName] instead. +func ToFileName(name string) string { return swagNameMangler.ToFileName(name) } + +// ToCommandName lowercases and underscores a go type name. +// +// Deprecated: use [mangling.NameMangler.ToCommandName] instead. +func ToCommandName(name string) string { return swagNameMangler.ToCommandName(name) } + +// ToHumanNameLower represents a code name as a human series of words. +// +// Deprecated: use [mangling.NameMangler.ToHumanNameLower] instead. +func ToHumanNameLower(name string) string { return swagNameMangler.ToHumanNameLower(name) } + +// ToHumanNameTitle represents a code name as a human series of words with the first letters titleized. +// +// Deprecated: use [mangling.NameMangler.ToHumanNameTitle] instead. +func ToHumanNameTitle(name string) string { return swagNameMangler.ToHumanNameTitle(name) } + +// ToJSONName camel-cases a name which can be underscored or pascal-cased. +// +// Deprecated: use [mangling.NameMangler.ToJSONName] instead. +func ToJSONName(name string) string { return swagNameMangler.ToJSONName(name) } + +// ToVarName camel-cases a name which can be underscored or pascal-cased. +// +// Deprecated: use [mangling.NameMangler.ToVarName] instead. +func ToVarName(name string) string { return swagNameMangler.ToVarName(name) } + +// ToGoName translates a swagger name which can be underscored or camel cased to a name that golint likes. +// +// Deprecated: use [mangling.NameMangler.ToGoName] instead. +func ToGoName(name string) string { return swagNameMangler.ToGoName(name) } diff --git a/vendor/github.com/go-openapi/swag/name_lexem.go b/vendor/github.com/go-openapi/swag/name_lexem.go deleted file mode 100644 index 8bb64ac32f..0000000000 --- a/vendor/github.com/go-openapi/swag/name_lexem.go +++ /dev/null @@ -1,93 +0,0 @@ -// Copyright 2015 go-swagger maintainers -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package swag - -import ( - "unicode" - "unicode/utf8" -) - -type ( - lexemKind uint8 - - nameLexem struct { - original string - matchedInitialism string - kind lexemKind - } -) - -const ( - lexemKindCasualName lexemKind = iota - lexemKindInitialismName -) - -func newInitialismNameLexem(original, matchedInitialism string) nameLexem { - return nameLexem{ - kind: lexemKindInitialismName, - original: original, - matchedInitialism: matchedInitialism, - } -} - -func newCasualNameLexem(original string) nameLexem { - return nameLexem{ - kind: lexemKindCasualName, - original: original, - } -} - -func (l nameLexem) GetUnsafeGoName() string { - if l.kind == lexemKindInitialismName { - return l.matchedInitialism - } - - var ( - first rune - rest string - ) - - for i, orig := range l.original { - if i == 0 { - first = orig - continue - } - - if i > 0 { - rest = l.original[i:] - break - } - } - - if len(l.original) > 1 { - b := poolOfBuffers.BorrowBuffer(utf8.UTFMax + len(rest)) - defer func() { - poolOfBuffers.RedeemBuffer(b) - }() - b.WriteRune(unicode.ToUpper(first)) - b.WriteString(lower(rest)) - return b.String() - } - - return l.original -} - -func (l nameLexem) GetOriginal() string { - return l.original -} - -func (l nameLexem) IsInitialism() bool { - return l.kind == lexemKindInitialismName -} diff --git a/vendor/github.com/go-openapi/swag/netutils/LICENSE b/vendor/github.com/go-openapi/swag/netutils/LICENSE new file mode 100644 index 0000000000..d645695673 --- /dev/null +++ b/vendor/github.com/go-openapi/swag/netutils/LICENSE @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/go-openapi/swag/netutils/doc.go b/vendor/github.com/go-openapi/swag/netutils/doc.go new file mode 100644 index 0000000000..ed6d8a022b --- /dev/null +++ b/vendor/github.com/go-openapi/swag/netutils/doc.go @@ -0,0 +1,16 @@ +// Copyright 2015 go-swagger maintainers +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package netutils provides helpers for network-related tasks. +package netutils diff --git a/vendor/github.com/go-openapi/swag/net.go b/vendor/github.com/go-openapi/swag/netutils/net.go similarity index 85% rename from vendor/github.com/go-openapi/swag/net.go rename to vendor/github.com/go-openapi/swag/netutils/net.go index 821235f84d..3d0182fc5a 100644 --- a/vendor/github.com/go-openapi/swag/net.go +++ b/vendor/github.com/go-openapi/swag/netutils/net.go @@ -12,7 +12,7 @@ // See the License for the specific language governing permissions and // limitations under the License. -package swag +package netutils import ( "net" @@ -20,7 +20,10 @@ import ( ) // SplitHostPort splits a network address into a host and a port. -// The port is -1 when there is no port to be found +// +// The difference with the standard net.SplitHostPort is that the port is converted to an int. +// +// The port is -1 when there is no port to be found. func SplitHostPort(addr string) (host string, port int, err error) { h, p, err := net.SplitHostPort(addr) if err != nil { @@ -34,5 +37,6 @@ func SplitHostPort(addr string) (host string, port int, err error) { if err != nil { return "", -1, err } + return h, pi, nil } diff --git a/vendor/github.com/go-openapi/swag/netutils_iface.go b/vendor/github.com/go-openapi/swag/netutils_iface.go new file mode 100644 index 0000000000..537314e364 --- /dev/null +++ b/vendor/github.com/go-openapi/swag/netutils_iface.go @@ -0,0 +1,24 @@ +// Copyright 2015 go-swagger maintainers +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package swag + +import "github.com/go-openapi/swag/netutils" + +// SplitHostPort splits a network address into a host and a port. +// +// Deprecated: use [netutils.SplitHostPort] instead. +func SplitHostPort(addr string) (host string, port int, err error) { + return netutils.SplitHostPort(addr) +} diff --git a/vendor/github.com/go-openapi/swag/string_bytes.go b/vendor/github.com/go-openapi/swag/string_bytes.go deleted file mode 100644 index 90745d5ca9..0000000000 --- a/vendor/github.com/go-openapi/swag/string_bytes.go +++ /dev/null @@ -1,8 +0,0 @@ -package swag - -import "unsafe" - -// hackStringBytes returns the (unsafe) underlying bytes slice of a string. -func hackStringBytes(str string) []byte { - return unsafe.Slice(unsafe.StringData(str), len(str)) -} diff --git a/vendor/github.com/go-openapi/swag/stringutils/LICENSE b/vendor/github.com/go-openapi/swag/stringutils/LICENSE new file mode 100644 index 0000000000..d645695673 --- /dev/null +++ b/vendor/github.com/go-openapi/swag/stringutils/LICENSE @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/go-openapi/swag/stringutils/collection_formats.go b/vendor/github.com/go-openapi/swag/stringutils/collection_formats.go new file mode 100644 index 0000000000..1ff96dcbd8 --- /dev/null +++ b/vendor/github.com/go-openapi/swag/stringutils/collection_formats.go @@ -0,0 +1,85 @@ +// Copyright 2015 go-swagger maintainers +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package stringutils + +import "strings" + +const ( + // collectionFormatComma = "csv" + collectionFormatSpace = "ssv" + collectionFormatTab = "tsv" + collectionFormatPipe = "pipes" + collectionFormatMulti = "multi" + + collectionFormatDefaultSep = "," +) + +// JoinByFormat joins a string array by a known format (e.g. swagger's collectionFormat attribute): +// +// ssv: space separated value +// tsv: tab separated value +// pipes: pipe (|) separated value +// csv: comma separated value (default) +func JoinByFormat(data []string, format string) []string { + if len(data) == 0 { + return data + } + var sep string + switch format { + case collectionFormatSpace: + sep = " " + case collectionFormatTab: + sep = "\t" + case collectionFormatPipe: + sep = "|" + case collectionFormatMulti: + return data + default: + sep = collectionFormatDefaultSep + } + return []string{strings.Join(data, sep)} +} + +// SplitByFormat splits a string by a known format: +// +// ssv: space separated value +// tsv: tab separated value +// pipes: pipe (|) separated value +// csv: comma separated value (default) +func SplitByFormat(data, format string) []string { + if data == "" { + return nil + } + var sep string + switch format { + case collectionFormatSpace: + sep = " " + case collectionFormatTab: + sep = "\t" + case collectionFormatPipe: + sep = "|" + case collectionFormatMulti: + return nil + default: + sep = collectionFormatDefaultSep + } + var result []string + for _, s := range strings.Split(data, sep) { + if ts := strings.TrimSpace(s); ts != "" { + result = append(result, ts) + } + } + return result +} diff --git a/vendor/github.com/go-openapi/swag/stringutils/doc.go b/vendor/github.com/go-openapi/swag/stringutils/doc.go new file mode 100644 index 0000000000..b5d18e517d --- /dev/null +++ b/vendor/github.com/go-openapi/swag/stringutils/doc.go @@ -0,0 +1,16 @@ +// Copyright 2015 go-swagger maintainers +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package stringutils exposes helpers to search and process strings. +package stringutils diff --git a/vendor/github.com/go-openapi/swag/stringutils/strings.go b/vendor/github.com/go-openapi/swag/stringutils/strings.go new file mode 100644 index 0000000000..086592317f --- /dev/null +++ b/vendor/github.com/go-openapi/swag/stringutils/strings.go @@ -0,0 +1,34 @@ +// Copyright 2015 go-swagger maintainers +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package stringutils + +import ( + "slices" + "strings" +) + +// ContainsStrings searches a slice of strings for a case-sensitive match +// +// Now equivalent to the standard library [slice.Contains]. +func ContainsStrings(coll []string, item string) bool { + return slices.Contains(coll, item) +} + +// ContainsStringsCI searches a slice of strings for a case-insensitive match +func ContainsStringsCI(coll []string, item string) bool { + return slices.ContainsFunc(coll, func(e string) bool { + return strings.EqualFold(e, item) + }) +} diff --git a/vendor/github.com/go-openapi/swag/stringutils_iface.go b/vendor/github.com/go-openapi/swag/stringutils_iface.go new file mode 100644 index 0000000000..00d7e02125 --- /dev/null +++ b/vendor/github.com/go-openapi/swag/stringutils_iface.go @@ -0,0 +1,45 @@ +// Copyright 2015 go-swagger maintainers +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package swag + +import "github.com/go-openapi/swag/stringutils" + +// ContainsStrings searches a slice of strings for a case-sensitive match. +// +// Deprecated: use [slices.Contains] or [stringutils.ContainsStrings] instead. +func ContainsStrings(coll []string, item string) bool { + return stringutils.ContainsStrings(coll, item) +} + +// ContainsStringsCI searches a slice of strings for a case-insensitive match. +// +// Deprecated: use [stringutils.ContainsStringsCI] instead. +func ContainsStringsCI(coll []string, item string) bool { + return stringutils.ContainsStringsCI(coll, item) +} + +// JoinByFormat joins a string array by a known format (e.g. swagger's collectionFormat attribute). +// +// Deprecated: use [stringutils.JoinByFormat] instead. +func JoinByFormat(data []string, format string) []string { + return stringutils.JoinByFormat(data, format) +} + +// SplitByFormat splits a string by a known format. +// +// Deprecated: use [stringutils.SplitByFormat] instead. +func SplitByFormat(data, format string) []string { + return stringutils.SplitByFormat(data, format) +} diff --git a/vendor/github.com/go-openapi/swag/typeutils/LICENSE b/vendor/github.com/go-openapi/swag/typeutils/LICENSE new file mode 100644 index 0000000000..d645695673 --- /dev/null +++ b/vendor/github.com/go-openapi/swag/typeutils/LICENSE @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/go-openapi/swag/typeutils/doc.go b/vendor/github.com/go-openapi/swag/typeutils/doc.go new file mode 100644 index 0000000000..67e49d12ec --- /dev/null +++ b/vendor/github.com/go-openapi/swag/typeutils/doc.go @@ -0,0 +1,16 @@ +// Copyright 2015 go-swagger maintainers +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package typeutils exposes utilities to inspect generic types. +package typeutils diff --git a/vendor/github.com/go-openapi/swag/typeutils/types.go b/vendor/github.com/go-openapi/swag/typeutils/types.go new file mode 100644 index 0000000000..f0ddd3cd3d --- /dev/null +++ b/vendor/github.com/go-openapi/swag/typeutils/types.go @@ -0,0 +1,91 @@ +// Copyright 2015 go-swagger maintainers +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package typeutils + +import "reflect" + +type zeroable interface { + IsZero() bool +} + +// IsZero returns true when the value passed into the function is a zero value. +// This allows for safer checking of interface values. +func IsZero(data any) bool { + v := reflect.ValueOf(data) + // check for nil data + switch v.Kind() { //nolint:exhaustive + case + reflect.Interface, + reflect.Func, + reflect.Chan, + reflect.Pointer, + reflect.UnsafePointer, + reflect.Map, + reflect.Slice: + if v.IsNil() { + return true + } + } + + // check for things that have an IsZero method instead + if vv, ok := data.(zeroable); ok { + return vv.IsZero() + } + + // continue with slightly more complex reflection + switch v.Kind() { //nolint:exhaustive + case reflect.String: + return v.Len() == 0 + case reflect.Bool: + return !v.Bool() + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + return v.Int() == 0 + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: + return v.Uint() == 0 + case reflect.Float32, reflect.Float64: + return v.Float() == 0 + case reflect.Struct, reflect.Array: + return reflect.DeepEqual(data, reflect.Zero(v.Type()).Interface()) + case reflect.Invalid: + return true + default: + return false + } +} + +// IsNil checks if input is nil. +// +// For types chan, func, interface, map, pointer, or slice it returns true if its argument is nil. +// +// See [reflect.Value.IsNil]. +func IsNil(input any) bool { + if input == nil { + return true + } + + kind := reflect.TypeOf(input).Kind() + switch kind { //nolint:exhaustive + case reflect.Pointer, + reflect.UnsafePointer, + reflect.Map, + reflect.Slice, + reflect.Chan, + reflect.Interface, + reflect.Func: + return reflect.ValueOf(input).IsNil() + default: + return false + } +} diff --git a/vendor/github.com/go-openapi/swag/typeutils_iface.go b/vendor/github.com/go-openapi/swag/typeutils_iface.go new file mode 100644 index 0000000000..b104a8040a --- /dev/null +++ b/vendor/github.com/go-openapi/swag/typeutils_iface.go @@ -0,0 +1,23 @@ +// Copyright 2015 go-swagger maintainers +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package swag + +import "github.com/go-openapi/swag/typeutils" + +// IsZero returns true when the value passed into the function is a zero value. +// This allows for safer checking of interface values. +// +// Deprecated: use [typeutils.IsZero] instead. +func IsZero(data interface{}) bool { return typeutils.IsZero(data) } diff --git a/vendor/github.com/go-openapi/swag/util.go b/vendor/github.com/go-openapi/swag/util.go deleted file mode 100644 index 5051401c49..0000000000 --- a/vendor/github.com/go-openapi/swag/util.go +++ /dev/null @@ -1,364 +0,0 @@ -// Copyright 2015 go-swagger maintainers -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package swag - -import ( - "reflect" - "strings" - "unicode" - "unicode/utf8" -) - -// GoNamePrefixFunc sets an optional rule to prefix go names -// which do not start with a letter. -// -// The prefix function is assumed to return a string that starts with an upper case letter. -// -// e.g. to help convert "123" into "{prefix}123" -// -// The default is to prefix with "X" -var GoNamePrefixFunc func(string) string - -func prefixFunc(name, in string) string { - if GoNamePrefixFunc == nil { - return "X" + in - } - - return GoNamePrefixFunc(name) + in -} - -const ( - // collectionFormatComma = "csv" - collectionFormatSpace = "ssv" - collectionFormatTab = "tsv" - collectionFormatPipe = "pipes" - collectionFormatMulti = "multi" -) - -// JoinByFormat joins a string array by a known format (e.g. swagger's collectionFormat attribute): -// -// ssv: space separated value -// tsv: tab separated value -// pipes: pipe (|) separated value -// csv: comma separated value (default) -func JoinByFormat(data []string, format string) []string { - if len(data) == 0 { - return data - } - var sep string - switch format { - case collectionFormatSpace: - sep = " " - case collectionFormatTab: - sep = "\t" - case collectionFormatPipe: - sep = "|" - case collectionFormatMulti: - return data - default: - sep = "," - } - return []string{strings.Join(data, sep)} -} - -// SplitByFormat splits a string by a known format: -// -// ssv: space separated value -// tsv: tab separated value -// pipes: pipe (|) separated value -// csv: comma separated value (default) -func SplitByFormat(data, format string) []string { - if data == "" { - return nil - } - var sep string - switch format { - case collectionFormatSpace: - sep = " " - case collectionFormatTab: - sep = "\t" - case collectionFormatPipe: - sep = "|" - case collectionFormatMulti: - return nil - default: - sep = "," - } - var result []string - for _, s := range strings.Split(data, sep) { - if ts := strings.TrimSpace(s); ts != "" { - result = append(result, ts) - } - } - return result -} - -// Removes leading whitespaces -func trim(str string) string { - return strings.TrimSpace(str) -} - -// Shortcut to strings.ToUpper() -func upper(str string) string { - return strings.ToUpper(trim(str)) -} - -// Shortcut to strings.ToLower() -func lower(str string) string { - return strings.ToLower(trim(str)) -} - -// Camelize an uppercased word -func Camelize(word string) string { - camelized := poolOfBuffers.BorrowBuffer(len(word)) - defer func() { - poolOfBuffers.RedeemBuffer(camelized) - }() - - for pos, ru := range []rune(word) { - if pos > 0 { - camelized.WriteRune(unicode.ToLower(ru)) - } else { - camelized.WriteRune(unicode.ToUpper(ru)) - } - } - return camelized.String() -} - -// ToFileName lowercases and underscores a go type name -func ToFileName(name string) string { - in := split(name) - out := make([]string, 0, len(in)) - - for _, w := range in { - out = append(out, lower(w)) - } - - return strings.Join(out, "_") -} - -// ToCommandName lowercases and underscores a go type name -func ToCommandName(name string) string { - in := split(name) - out := make([]string, 0, len(in)) - - for _, w := range in { - out = append(out, lower(w)) - } - return strings.Join(out, "-") -} - -// ToHumanNameLower represents a code name as a human series of words -func ToHumanNameLower(name string) string { - s := poolOfSplitters.BorrowSplitter(withPostSplitInitialismCheck) - in := s.split(name) - poolOfSplitters.RedeemSplitter(s) - out := make([]string, 0, len(*in)) - - for _, w := range *in { - if !w.IsInitialism() { - out = append(out, lower(w.GetOriginal())) - } else { - out = append(out, trim(w.GetOriginal())) - } - } - poolOfLexems.RedeemLexems(in) - - return strings.Join(out, " ") -} - -// ToHumanNameTitle represents a code name as a human series of words with the first letters titleized -func ToHumanNameTitle(name string) string { - s := poolOfSplitters.BorrowSplitter(withPostSplitInitialismCheck) - in := s.split(name) - poolOfSplitters.RedeemSplitter(s) - - out := make([]string, 0, len(*in)) - for _, w := range *in { - original := trim(w.GetOriginal()) - if !w.IsInitialism() { - out = append(out, Camelize(original)) - } else { - out = append(out, original) - } - } - poolOfLexems.RedeemLexems(in) - - return strings.Join(out, " ") -} - -// ToJSONName camelcases a name which can be underscored or pascal cased -func ToJSONName(name string) string { - in := split(name) - out := make([]string, 0, len(in)) - - for i, w := range in { - if i == 0 { - out = append(out, lower(w)) - continue - } - out = append(out, Camelize(trim(w))) - } - return strings.Join(out, "") -} - -// ToVarName camelcases a name which can be underscored or pascal cased -func ToVarName(name string) string { - res := ToGoName(name) - if isInitialism(res) { - return lower(res) - } - if len(res) <= 1 { - return lower(res) - } - return lower(res[:1]) + res[1:] -} - -// ToGoName translates a swagger name which can be underscored or camel cased to a name that golint likes -func ToGoName(name string) string { - s := poolOfSplitters.BorrowSplitter(withPostSplitInitialismCheck) - lexems := s.split(name) - poolOfSplitters.RedeemSplitter(s) - defer func() { - poolOfLexems.RedeemLexems(lexems) - }() - lexemes := *lexems - - if len(lexemes) == 0 { - return "" - } - - result := poolOfBuffers.BorrowBuffer(len(name)) - defer func() { - poolOfBuffers.RedeemBuffer(result) - }() - - // check if not starting with a letter, upper case - firstPart := lexemes[0].GetUnsafeGoName() - if lexemes[0].IsInitialism() { - firstPart = upper(firstPart) - } - - if c := firstPart[0]; c < utf8.RuneSelf { - // ASCII - switch { - case 'A' <= c && c <= 'Z': - result.WriteString(firstPart) - case 'a' <= c && c <= 'z': - result.WriteByte(c - 'a' + 'A') - result.WriteString(firstPart[1:]) - default: - result.WriteString(prefixFunc(name, firstPart)) - // NOTE: no longer check if prefixFunc returns a string that starts with uppercase: - // assume this is always the case - } - } else { - // unicode - firstRune, _ := utf8.DecodeRuneInString(firstPart) - switch { - case !unicode.IsLetter(firstRune): - result.WriteString(prefixFunc(name, firstPart)) - case !unicode.IsUpper(firstRune): - result.WriteString(prefixFunc(name, firstPart)) - /* - result.WriteRune(unicode.ToUpper(firstRune)) - result.WriteString(firstPart[offset:]) - */ - default: - result.WriteString(firstPart) - } - } - - for _, lexem := range lexemes[1:] { - goName := lexem.GetUnsafeGoName() - - // to support old behavior - if lexem.IsInitialism() { - goName = upper(goName) - } - result.WriteString(goName) - } - - return result.String() -} - -// ContainsStrings searches a slice of strings for a case-sensitive match -func ContainsStrings(coll []string, item string) bool { - for _, a := range coll { - if a == item { - return true - } - } - return false -} - -// ContainsStringsCI searches a slice of strings for a case-insensitive match -func ContainsStringsCI(coll []string, item string) bool { - for _, a := range coll { - if strings.EqualFold(a, item) { - return true - } - } - return false -} - -type zeroable interface { - IsZero() bool -} - -// IsZero returns true when the value passed into the function is a zero value. -// This allows for safer checking of interface values. -func IsZero(data interface{}) bool { - v := reflect.ValueOf(data) - // check for nil data - switch v.Kind() { //nolint:exhaustive - case reflect.Interface, reflect.Map, reflect.Ptr, reflect.Slice: - if v.IsNil() { - return true - } - } - - // check for things that have an IsZero method instead - if vv, ok := data.(zeroable); ok { - return vv.IsZero() - } - - // continue with slightly more complex reflection - switch v.Kind() { //nolint:exhaustive - case reflect.String: - return v.Len() == 0 - case reflect.Bool: - return !v.Bool() - case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: - return v.Int() == 0 - case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: - return v.Uint() == 0 - case reflect.Float32, reflect.Float64: - return v.Float() == 0 - case reflect.Struct, reflect.Array: - return reflect.DeepEqual(data, reflect.Zero(v.Type()).Interface()) - case reflect.Invalid: - return true - default: - return false - } -} - -// CommandLineOptionsGroup represents a group of user-defined command line options -type CommandLineOptionsGroup struct { - ShortDescription string - LongDescription string - Options interface{} -} diff --git a/vendor/github.com/go-openapi/swag/yaml.go b/vendor/github.com/go-openapi/swag/yaml.go deleted file mode 100644 index f59e025932..0000000000 --- a/vendor/github.com/go-openapi/swag/yaml.go +++ /dev/null @@ -1,481 +0,0 @@ -// Copyright 2015 go-swagger maintainers -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package swag - -import ( - "encoding/json" - "errors" - "fmt" - "path/filepath" - "reflect" - "sort" - "strconv" - - "github.com/mailru/easyjson/jlexer" - "github.com/mailru/easyjson/jwriter" - yaml "gopkg.in/yaml.v3" -) - -// YAMLMatcher matches yaml -func YAMLMatcher(path string) bool { - ext := filepath.Ext(path) - return ext == ".yaml" || ext == ".yml" -} - -// YAMLToJSON converts YAML unmarshaled data into json compatible data -func YAMLToJSON(data interface{}) (json.RawMessage, error) { - jm, err := transformData(data) - if err != nil { - return nil, err - } - b, err := WriteJSON(jm) - return json.RawMessage(b), err -} - -// BytesToYAMLDoc converts a byte slice into a YAML document -func BytesToYAMLDoc(data []byte) (interface{}, error) { - var document yaml.Node // preserve order that is present in the document - if err := yaml.Unmarshal(data, &document); err != nil { - return nil, err - } - if document.Kind != yaml.DocumentNode || len(document.Content) != 1 || document.Content[0].Kind != yaml.MappingNode { - return nil, errors.New("only YAML documents that are objects are supported") - } - return &document, nil -} - -func yamlNode(root *yaml.Node) (interface{}, error) { - switch root.Kind { - case yaml.DocumentNode: - return yamlDocument(root) - case yaml.SequenceNode: - return yamlSequence(root) - case yaml.MappingNode: - return yamlMapping(root) - case yaml.ScalarNode: - return yamlScalar(root) - case yaml.AliasNode: - return yamlNode(root.Alias) - default: - return nil, fmt.Errorf("unsupported YAML node type: %v", root.Kind) - } -} - -func yamlDocument(node *yaml.Node) (interface{}, error) { - if len(node.Content) != 1 { - return nil, fmt.Errorf("unexpected YAML Document node content length: %d", len(node.Content)) - } - return yamlNode(node.Content[0]) -} - -func yamlMapping(node *yaml.Node) (interface{}, error) { - m := make(JSONMapSlice, len(node.Content)/2) - - var j int - for i := 0; i < len(node.Content); i += 2 { - var nmi JSONMapItem - k, err := yamlStringScalarC(node.Content[i]) - if err != nil { - return nil, fmt.Errorf("unable to decode YAML map key: %w", err) - } - nmi.Key = k - v, err := yamlNode(node.Content[i+1]) - if err != nil { - return nil, fmt.Errorf("unable to process YAML map value for key %q: %w", k, err) - } - nmi.Value = v - m[j] = nmi - j++ - } - return m, nil -} - -func yamlSequence(node *yaml.Node) (interface{}, error) { - s := make([]interface{}, 0) - - for i := 0; i < len(node.Content); i++ { - - v, err := yamlNode(node.Content[i]) - if err != nil { - return nil, fmt.Errorf("unable to decode YAML sequence value: %w", err) - } - s = append(s, v) - } - return s, nil -} - -const ( // See https://yaml.org/type/ - yamlStringScalar = "tag:yaml.org,2002:str" - yamlIntScalar = "tag:yaml.org,2002:int" - yamlBoolScalar = "tag:yaml.org,2002:bool" - yamlFloatScalar = "tag:yaml.org,2002:float" - yamlTimestamp = "tag:yaml.org,2002:timestamp" - yamlNull = "tag:yaml.org,2002:null" -) - -func yamlScalar(node *yaml.Node) (interface{}, error) { - switch node.LongTag() { - case yamlStringScalar: - return node.Value, nil - case yamlBoolScalar: - b, err := strconv.ParseBool(node.Value) - if err != nil { - return nil, fmt.Errorf("unable to process scalar node. Got %q. Expecting bool content: %w", node.Value, err) - } - return b, nil - case yamlIntScalar: - i, err := strconv.ParseInt(node.Value, 10, 64) - if err != nil { - return nil, fmt.Errorf("unable to process scalar node. Got %q. Expecting integer content: %w", node.Value, err) - } - return i, nil - case yamlFloatScalar: - f, err := strconv.ParseFloat(node.Value, 64) - if err != nil { - return nil, fmt.Errorf("unable to process scalar node. Got %q. Expecting float content: %w", node.Value, err) - } - return f, nil - case yamlTimestamp: - return node.Value, nil - case yamlNull: - return nil, nil //nolint:nilnil - default: - return nil, fmt.Errorf("YAML tag %q is not supported", node.LongTag()) - } -} - -func yamlStringScalarC(node *yaml.Node) (string, error) { - if node.Kind != yaml.ScalarNode { - return "", fmt.Errorf("expecting a string scalar but got %q", node.Kind) - } - switch node.LongTag() { - case yamlStringScalar, yamlIntScalar, yamlFloatScalar: - return node.Value, nil - default: - return "", fmt.Errorf("YAML tag %q is not supported as map key", node.LongTag()) - } -} - -// JSONMapSlice represent a JSON object, with the order of keys maintained -type JSONMapSlice []JSONMapItem - -// MarshalJSON renders a JSONMapSlice as JSON -func (s JSONMapSlice) MarshalJSON() ([]byte, error) { - w := &jwriter.Writer{Flags: jwriter.NilMapAsEmpty | jwriter.NilSliceAsEmpty} - s.MarshalEasyJSON(w) - return w.BuildBytes() -} - -// MarshalEasyJSON renders a JSONMapSlice as JSON, using easyJSON -func (s JSONMapSlice) MarshalEasyJSON(w *jwriter.Writer) { - w.RawByte('{') - - ln := len(s) - last := ln - 1 - for i := 0; i < ln; i++ { - s[i].MarshalEasyJSON(w) - if i != last { // last item - w.RawByte(',') - } - } - - w.RawByte('}') -} - -// UnmarshalJSON makes a JSONMapSlice from JSON -func (s *JSONMapSlice) UnmarshalJSON(data []byte) error { - l := jlexer.Lexer{Data: data} - s.UnmarshalEasyJSON(&l) - return l.Error() -} - -// UnmarshalEasyJSON makes a JSONMapSlice from JSON, using easyJSON -func (s *JSONMapSlice) UnmarshalEasyJSON(in *jlexer.Lexer) { - if in.IsNull() { - in.Skip() - return - } - - var result JSONMapSlice - in.Delim('{') - for !in.IsDelim('}') { - var mi JSONMapItem - mi.UnmarshalEasyJSON(in) - result = append(result, mi) - } - *s = result -} - -func (s JSONMapSlice) MarshalYAML() (interface{}, error) { - var n yaml.Node - n.Kind = yaml.DocumentNode - var nodes []*yaml.Node - for _, item := range s { - nn, err := json2yaml(item.Value) - if err != nil { - return nil, err - } - ns := []*yaml.Node{ - { - Kind: yaml.ScalarNode, - Tag: yamlStringScalar, - Value: item.Key, - }, - nn, - } - nodes = append(nodes, ns...) - } - - n.Content = []*yaml.Node{ - { - Kind: yaml.MappingNode, - Content: nodes, - }, - } - - return yaml.Marshal(&n) -} - -func isNil(input interface{}) bool { - if input == nil { - return true - } - kind := reflect.TypeOf(input).Kind() - switch kind { //nolint:exhaustive - case reflect.Ptr, reflect.Map, reflect.Slice, reflect.Chan: - return reflect.ValueOf(input).IsNil() - default: - return false - } -} - -func json2yaml(item interface{}) (*yaml.Node, error) { - if isNil(item) { - return &yaml.Node{ - Kind: yaml.ScalarNode, - Value: "null", - }, nil - } - - switch val := item.(type) { - case JSONMapSlice: - var n yaml.Node - n.Kind = yaml.MappingNode - for i := range val { - childNode, err := json2yaml(&val[i].Value) - if err != nil { - return nil, err - } - n.Content = append(n.Content, &yaml.Node{ - Kind: yaml.ScalarNode, - Tag: yamlStringScalar, - Value: val[i].Key, - }, childNode) - } - return &n, nil - case map[string]interface{}: - var n yaml.Node - n.Kind = yaml.MappingNode - keys := make([]string, 0, len(val)) - for k := range val { - keys = append(keys, k) - } - sort.Strings(keys) - - for _, k := range keys { - v := val[k] - childNode, err := json2yaml(v) - if err != nil { - return nil, err - } - n.Content = append(n.Content, &yaml.Node{ - Kind: yaml.ScalarNode, - Tag: yamlStringScalar, - Value: k, - }, childNode) - } - return &n, nil - case []interface{}: - var n yaml.Node - n.Kind = yaml.SequenceNode - for i := range val { - childNode, err := json2yaml(val[i]) - if err != nil { - return nil, err - } - n.Content = append(n.Content, childNode) - } - return &n, nil - case string: - return &yaml.Node{ - Kind: yaml.ScalarNode, - Tag: yamlStringScalar, - Value: val, - }, nil - case float64: - return &yaml.Node{ - Kind: yaml.ScalarNode, - Tag: yamlFloatScalar, - Value: strconv.FormatFloat(val, 'f', -1, 64), - }, nil - case int64: - return &yaml.Node{ - Kind: yaml.ScalarNode, - Tag: yamlIntScalar, - Value: strconv.FormatInt(val, 10), - }, nil - case uint64: - return &yaml.Node{ - Kind: yaml.ScalarNode, - Tag: yamlIntScalar, - Value: strconv.FormatUint(val, 10), - }, nil - case bool: - return &yaml.Node{ - Kind: yaml.ScalarNode, - Tag: yamlBoolScalar, - Value: strconv.FormatBool(val), - }, nil - default: - return nil, fmt.Errorf("unhandled type: %T", val) - } -} - -// JSONMapItem represents the value of a key in a JSON object held by JSONMapSlice -type JSONMapItem struct { - Key string - Value interface{} -} - -// MarshalJSON renders a JSONMapItem as JSON -func (s JSONMapItem) MarshalJSON() ([]byte, error) { - w := &jwriter.Writer{Flags: jwriter.NilMapAsEmpty | jwriter.NilSliceAsEmpty} - s.MarshalEasyJSON(w) - return w.BuildBytes() -} - -// MarshalEasyJSON renders a JSONMapItem as JSON, using easyJSON -func (s JSONMapItem) MarshalEasyJSON(w *jwriter.Writer) { - w.String(s.Key) - w.RawByte(':') - w.Raw(WriteJSON(s.Value)) -} - -// UnmarshalJSON makes a JSONMapItem from JSON -func (s *JSONMapItem) UnmarshalJSON(data []byte) error { - l := jlexer.Lexer{Data: data} - s.UnmarshalEasyJSON(&l) - return l.Error() -} - -// UnmarshalEasyJSON makes a JSONMapItem from JSON, using easyJSON -func (s *JSONMapItem) UnmarshalEasyJSON(in *jlexer.Lexer) { - key := in.UnsafeString() - in.WantColon() - value := in.Interface() - in.WantComma() - s.Key = key - s.Value = value -} - -func transformData(input interface{}) (out interface{}, err error) { - format := func(t interface{}) (string, error) { - switch k := t.(type) { - case string: - return k, nil - case uint: - return strconv.FormatUint(uint64(k), 10), nil - case uint8: - return strconv.FormatUint(uint64(k), 10), nil - case uint16: - return strconv.FormatUint(uint64(k), 10), nil - case uint32: - return strconv.FormatUint(uint64(k), 10), nil - case uint64: - return strconv.FormatUint(k, 10), nil - case int: - return strconv.Itoa(k), nil - case int8: - return strconv.FormatInt(int64(k), 10), nil - case int16: - return strconv.FormatInt(int64(k), 10), nil - case int32: - return strconv.FormatInt(int64(k), 10), nil - case int64: - return strconv.FormatInt(k, 10), nil - default: - return "", fmt.Errorf("unexpected map key type, got: %T", k) - } - } - - switch in := input.(type) { - case yaml.Node: - return yamlNode(&in) - case *yaml.Node: - return yamlNode(in) - case map[interface{}]interface{}: - o := make(JSONMapSlice, 0, len(in)) - for ke, va := range in { - var nmi JSONMapItem - if nmi.Key, err = format(ke); err != nil { - return nil, err - } - - v, ert := transformData(va) - if ert != nil { - return nil, ert - } - nmi.Value = v - o = append(o, nmi) - } - return o, nil - case []interface{}: - len1 := len(in) - o := make([]interface{}, len1) - for i := 0; i < len1; i++ { - o[i], err = transformData(in[i]) - if err != nil { - return nil, err - } - } - return o, nil - } - return input, nil -} - -// YAMLDoc loads a yaml document from either http or a file and converts it to json -func YAMLDoc(path string) (json.RawMessage, error) { - yamlDoc, err := YAMLData(path) - if err != nil { - return nil, err - } - - data, err := YAMLToJSON(yamlDoc) - if err != nil { - return nil, err - } - - return data, nil -} - -// YAMLData loads a yaml document from either http or a file -func YAMLData(path string) (interface{}, error) { - data, err := LoadFromFileOrHTTP(path) - if err != nil { - return nil, err - } - - return BytesToYAMLDoc(data) -} diff --git a/vendor/github.com/go-openapi/swag/yamlutils/LICENSE b/vendor/github.com/go-openapi/swag/yamlutils/LICENSE new file mode 100644 index 0000000000..d645695673 --- /dev/null +++ b/vendor/github.com/go-openapi/swag/yamlutils/LICENSE @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/go-openapi/swag/yamlutils/doc.go b/vendor/github.com/go-openapi/swag/yamlutils/doc.go new file mode 100644 index 0000000000..4aeadc2248 --- /dev/null +++ b/vendor/github.com/go-openapi/swag/yamlutils/doc.go @@ -0,0 +1,24 @@ +// Copyright 2015 go-swagger maintainers +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package yamlutils provides utilities to work with YAML documents. +// +// - [BytesToYAMLDoc] to construct a [yaml.Node] document +// - [YAMLToJSON] to convert a [yaml.Node] document to JSON bytes +// - [YAMLMapSlice] to serialize and deserialize YAML with the order of keys maintained +package yamlutils + +import ( + _ "go.yaml.in/yaml/v3" // for documentation purpose only +) diff --git a/vendor/github.com/go-openapi/swag/yamlutils/errors.go b/vendor/github.com/go-openapi/swag/yamlutils/errors.go new file mode 100644 index 0000000000..014f227d96 --- /dev/null +++ b/vendor/github.com/go-openapi/swag/yamlutils/errors.go @@ -0,0 +1,26 @@ +// Copyright 2015 go-swagger maintainers +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package yamlutils + +type yamlError string + +const ( + // ErrYAML is an error raised by YAML utilities + ErrYAML yamlError = "yaml error" +) + +func (e yamlError) Error() string { + return string(e) +} diff --git a/vendor/github.com/go-openapi/swag/yamlutils/ordered_map.go b/vendor/github.com/go-openapi/swag/yamlutils/ordered_map.go new file mode 100644 index 0000000000..af1d7bb518 --- /dev/null +++ b/vendor/github.com/go-openapi/swag/yamlutils/ordered_map.go @@ -0,0 +1,313 @@ +package yamlutils + +import ( + "fmt" + "iter" + "slices" + "sort" + "strconv" + + "github.com/go-openapi/swag/conv" + "github.com/go-openapi/swag/jsonutils" + "github.com/go-openapi/swag/jsonutils/adapters/ifaces" + "github.com/go-openapi/swag/typeutils" + yaml "go.yaml.in/yaml/v3" +) + +var ( + _ yaml.Marshaler = YAMLMapSlice{} + _ yaml.Unmarshaler = &YAMLMapSlice{} +) + +// YAMLMapSlice represents a YAML object, with the order of keys maintained. +// +// It is similar to [jsonutils.JSONMapSlice] and also knows how to marshal and unmarshal YAML. +// +// It behaves like an ordered map, but keys can't be accessed in constant time. +type YAMLMapSlice []YAMLMapItem + +// YAMLMapItem represents the value of a key in a YAML object held by [YAMLMapSlice]. +// +// It is entirely equivalent to [jsonutils.JSONMapItem], with the same limitation that +// you should not Marshal or Unmarshal directly this type, outside of a [YAMLMapSlice]. +type YAMLMapItem = jsonutils.JSONMapItem + +func (s YAMLMapSlice) OrderedItems() iter.Seq2[string, any] { + return func(yield func(string, any) bool) { + for _, item := range s { + if !yield(item.Key, item.Value) { + return + } + } + } +} + +// SetOrderedItems implements [ifaces.SetOrdered]: it merges keys passed by the iterator argument +// into the [YAMLMapSlice]. +func (s *YAMLMapSlice) SetOrderedItems(items iter.Seq2[string, any]) { + if items == nil { + // force receiver to be a nil slice + *s = nil + + return + } + + m := *s + if len(m) > 0 { + // update mode: short-circuited when unmarshaling fresh data structures + idx := make(map[string]int, len(m)) + + for i, item := range m { + idx[item.Key] = i + } + + for k, v := range items { + idx, ok := idx[k] + if ok { + m[idx].Value = v + + continue + } + + m = append(m, YAMLMapItem{Key: k, Value: v}) + } + + *s = m + + return + } + + for k, v := range items { + m = append(m, YAMLMapItem{Key: k, Value: v}) + } + + *s = m +} + +// MarshalJSON renders this YAML object as JSON bytes. +// +// The difference with standard JSON marshaling is that the order of keys is maintained. +func (s YAMLMapSlice) MarshalJSON() ([]byte, error) { + return jsonutils.JSONMapSlice(s).MarshalJSON() +} + +// UnmarshalJSON builds this YAML object from JSON bytes. +// +// The difference with standard JSON marshaling is that the order of keys is maintained. +func (s *YAMLMapSlice) UnmarshalJSON(data []byte) error { + js := jsonutils.JSONMapSlice(*s) + + if err := js.UnmarshalJSON(data); err != nil { + return err + } + + *s = YAMLMapSlice(js) + + return nil +} + +// MarshalYAML produces a YAML document as bytes +// +// The difference with standard YAML marshaling is that the order of keys is maintained. +// +// It implements [yaml.Marshaler]. +func (s YAMLMapSlice) MarshalYAML() (any, error) { + if typeutils.IsNil(s) { + return []byte("null\n"), nil + } + var n yaml.Node + n.Kind = yaml.DocumentNode + var nodes []*yaml.Node + + for _, item := range s { + nn, err := json2yaml(item.Value) + if err != nil { + return nil, err + } + + ns := []*yaml.Node{ + { + Kind: yaml.ScalarNode, + Tag: yamlStringScalar, + Value: item.Key, + }, + nn, + } + nodes = append(nodes, ns...) + } + + n.Content = []*yaml.Node{ + { + Kind: yaml.MappingNode, + Content: nodes, + }, + } + + return yaml.Marshal(&n) +} + +// UnmarshalYAML builds a YAMLMapSlice object from a YAML document [yaml.Node]. +// +// It implements [yaml.Unmarshaler]. +func (s *YAMLMapSlice) UnmarshalYAML(node *yaml.Node) error { + if typeutils.IsNil(*s) { + // allow to unmarshal with a simple var declaration (nil slice) + *s = YAMLMapSlice{} + } + if node == nil { + *s = nil + return nil + } + + const sensibleAllocDivider = 2 + m := slices.Grow(*s, len(node.Content)/sensibleAllocDivider) + m = m[:0] + + for i := 0; i < len(node.Content); i += 2 { + var nmi YAMLMapItem + k, err := yamlStringScalarC(node.Content[i]) + if err != nil { + return fmt.Errorf("unable to decode YAML map key: %w: %w", err, ErrYAML) + } + nmi.Key = k + v, err := yamlNode(node.Content[i+1]) + if err != nil { + return fmt.Errorf("unable to process YAML map value for key %q: %w: %w", k, err, ErrYAML) + } + nmi.Value = v + m = append(m, nmi) + } + + *s = m + + return nil +} + +func json2yaml(item any) (*yaml.Node, error) { + if typeutils.IsNil(item) { + return &yaml.Node{ + Kind: yaml.ScalarNode, + Value: "null", + }, nil + } + + switch val := item.(type) { + case ifaces.Ordered: + return orderedYAML(val) + + case map[string]any: + var n yaml.Node + n.Kind = yaml.MappingNode + keys := make([]string, 0, len(val)) + for k := range val { + keys = append(keys, k) + } + sort.Strings(keys) + + for _, k := range keys { + v := val[k] + childNode, err := json2yaml(v) + if err != nil { + return nil, err + } + n.Content = append(n.Content, &yaml.Node{ + Kind: yaml.ScalarNode, + Tag: yamlStringScalar, + Value: k, + }, childNode) + } + return &n, nil + + case []any: + var n yaml.Node + n.Kind = yaml.SequenceNode + for i := range val { + childNode, err := json2yaml(val[i]) + if err != nil { + return nil, err + } + n.Content = append(n.Content, childNode) + } + return &n, nil + case string: + return &yaml.Node{ + Kind: yaml.ScalarNode, + Tag: yamlStringScalar, + Value: val, + }, nil + case float32: + return floatNode(val) + case float64: + return floatNode(val) + case int: + return integerNode(val) + case int8: + return integerNode(val) + case int16: + return integerNode(val) + case int32: + return integerNode(val) + case int64: + return integerNode(val) + case uint: + return uintegerNode(val) + case uint8: + return uintegerNode(val) + case uint16: + return uintegerNode(val) + case uint32: + return uintegerNode(val) + case uint64: + return uintegerNode(val) + case bool: + return &yaml.Node{ + Kind: yaml.ScalarNode, + Tag: yamlBoolScalar, + Value: strconv.FormatBool(val), + }, nil + default: + return nil, fmt.Errorf("unhandled type: %T: %w", val, ErrYAML) + } +} + +func floatNode[T conv.Float](val T) (*yaml.Node, error) { + return &yaml.Node{ + Kind: yaml.ScalarNode, + Tag: yamlFloatScalar, + Value: conv.FormatFloat(val), + }, nil +} + +func integerNode[T conv.Signed](val T) (*yaml.Node, error) { + return &yaml.Node{ + Kind: yaml.ScalarNode, + Tag: yamlIntScalar, + Value: conv.FormatInteger(val), + }, nil +} + +func uintegerNode[T conv.Unsigned](val T) (*yaml.Node, error) { + return &yaml.Node{ + Kind: yaml.ScalarNode, + Tag: yamlIntScalar, + Value: conv.FormatUinteger(val), + }, nil +} + +func orderedYAML[T ifaces.Ordered](val T) (*yaml.Node, error) { + var n yaml.Node + n.Kind = yaml.MappingNode + for key, value := range val.OrderedItems() { + childNode, err := json2yaml(value) + if err != nil { + return nil, err + } + + n.Content = append(n.Content, &yaml.Node{ + Kind: yaml.ScalarNode, + Tag: yamlStringScalar, + Value: key, + }, childNode) + } + return &n, nil +} diff --git a/vendor/github.com/go-openapi/swag/yamlutils/yaml.go b/vendor/github.com/go-openapi/swag/yamlutils/yaml.go new file mode 100644 index 0000000000..67fba8fd7c --- /dev/null +++ b/vendor/github.com/go-openapi/swag/yamlutils/yaml.go @@ -0,0 +1,222 @@ +// Copyright 2015 go-swagger maintainers +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package yamlutils + +import ( + json "encoding/json" + "fmt" + "strconv" + + "github.com/go-openapi/swag/jsonutils" + yaml "go.yaml.in/yaml/v3" +) + +// YAMLToJSON converts a YAML document into JSON bytes. +// +// Note: a YAML document is the output from a [yaml.Marshaler], e.g a pointer to a [yaml.Node]. +// +// [YAMLToJSON] is typically called after [BytesToYAMLDoc]. +func YAMLToJSON(value any) (json.RawMessage, error) { + jm, err := transformData(value) + if err != nil { + return nil, err + } + + b, err := jsonutils.WriteJSON(jm) + + return json.RawMessage(b), err +} + +// BytesToYAMLDoc converts a byte slice into a YAML document. +// +// This function only supports root documents that are objects. +// +// A YAML document is a pointer to a [yaml.Node]. +func BytesToYAMLDoc(data []byte) (any, error) { + var document yaml.Node // preserve order that is present in the document + if err := yaml.Unmarshal(data, &document); err != nil { + return nil, err + } + if document.Kind != yaml.DocumentNode || len(document.Content) != 1 || document.Content[0].Kind != yaml.MappingNode { + return nil, fmt.Errorf("only YAML documents that are objects are supported: %w", ErrYAML) + } + return &document, nil +} + +func yamlNode(root *yaml.Node) (any, error) { + switch root.Kind { + case yaml.DocumentNode: + return yamlDocument(root) + case yaml.SequenceNode: + return yamlSequence(root) + case yaml.MappingNode: + return yamlMapping(root) + case yaml.ScalarNode: + return yamlScalar(root) + case yaml.AliasNode: + return yamlNode(root.Alias) + default: + return nil, fmt.Errorf("unsupported YAML node type: %v: %w", root.Kind, ErrYAML) + } +} + +func yamlDocument(node *yaml.Node) (any, error) { + if len(node.Content) != 1 { + return nil, fmt.Errorf("unexpected YAML Document node content length: %d: %w", len(node.Content), ErrYAML) + } + return yamlNode(node.Content[0]) +} + +func yamlMapping(node *yaml.Node) (any, error) { + const sensibleAllocDivider = 2 // nodes concatenate (key,value) sequences + m := make(YAMLMapSlice, len(node.Content)/sensibleAllocDivider) + + if err := m.UnmarshalYAML(node); err != nil { + return nil, err + } + + return m, nil +} + +func yamlSequence(node *yaml.Node) (any, error) { + s := make([]any, 0) + + for i := range len(node.Content) { + v, err := yamlNode(node.Content[i]) + if err != nil { + return nil, fmt.Errorf("unable to decode YAML sequence value: %w: %w", err, ErrYAML) + } + s = append(s, v) + } + return s, nil +} + +const ( // See https://yaml.org/type/ + yamlStringScalar = "tag:yaml.org,2002:str" + yamlIntScalar = "tag:yaml.org,2002:int" + yamlBoolScalar = "tag:yaml.org,2002:bool" + yamlFloatScalar = "tag:yaml.org,2002:float" + yamlTimestamp = "tag:yaml.org,2002:timestamp" + yamlNull = "tag:yaml.org,2002:null" +) + +func yamlScalar(node *yaml.Node) (any, error) { + switch node.LongTag() { + case yamlStringScalar: + return node.Value, nil + case yamlBoolScalar: + b, err := strconv.ParseBool(node.Value) + if err != nil { + return nil, fmt.Errorf("unable to process scalar node. Got %q. Expecting bool content: %w: %w", node.Value, err, ErrYAML) + } + return b, nil + case yamlIntScalar: + i, err := strconv.ParseInt(node.Value, 10, 64) + if err != nil { + return nil, fmt.Errorf("unable to process scalar node. Got %q. Expecting integer content: %w: %w", node.Value, err, ErrYAML) + } + return i, nil + case yamlFloatScalar: + f, err := strconv.ParseFloat(node.Value, 64) + if err != nil { + return nil, fmt.Errorf("unable to process scalar node. Got %q. Expecting float content: %w: %w", node.Value, err, ErrYAML) + } + return f, nil + case yamlTimestamp: + // YAML timestamp is marshaled as string, not time + return node.Value, nil + case yamlNull: + return nil, nil //nolint:nilnil + default: + return nil, fmt.Errorf("YAML tag %q is not supported: %w", node.LongTag(), ErrYAML) + } +} + +func yamlStringScalarC(node *yaml.Node) (string, error) { + if node.Kind != yaml.ScalarNode { + return "", fmt.Errorf("expecting a string scalar but got %q: %w", node.Kind, ErrYAML) + } + switch node.LongTag() { + case yamlStringScalar, yamlIntScalar, yamlFloatScalar: + return node.Value, nil + default: + return "", fmt.Errorf("YAML tag %q is not supported as map key: %w", node.LongTag(), ErrYAML) + } +} + +func format(t any) (string, error) { + switch k := t.(type) { + case string: + return k, nil + case uint: + return strconv.FormatUint(uint64(k), 10), nil + case uint8: + return strconv.FormatUint(uint64(k), 10), nil + case uint16: + return strconv.FormatUint(uint64(k), 10), nil + case uint32: + return strconv.FormatUint(uint64(k), 10), nil + case uint64: + return strconv.FormatUint(k, 10), nil + case int: + return strconv.Itoa(k), nil + case int8: + return strconv.FormatInt(int64(k), 10), nil + case int16: + return strconv.FormatInt(int64(k), 10), nil + case int32: + return strconv.FormatInt(int64(k), 10), nil + case int64: + return strconv.FormatInt(k, 10), nil + default: + return "", fmt.Errorf("unexpected map key type, got: %T: %w", k, ErrYAML) + } +} + +func transformData(input any) (out any, err error) { + switch in := input.(type) { + case yaml.Node: + return yamlNode(&in) + case *yaml.Node: + return yamlNode(in) + case map[any]any: + o := make(YAMLMapSlice, 0, len(in)) + for ke, va := range in { + var nmi YAMLMapItem + if nmi.Key, err = format(ke); err != nil { + return nil, err + } + + v, ert := transformData(va) + if ert != nil { + return nil, ert + } + nmi.Value = v + o = append(o, nmi) + } + return o, nil + case []any: + len1 := len(in) + o := make([]any, len1) + for i := range len1 { + o[i], err = transformData(in[i]) + if err != nil { + return nil, err + } + } + return o, nil + } + return input, nil +} diff --git a/vendor/github.com/go-openapi/swag/yamlutils_iface.go b/vendor/github.com/go-openapi/swag/yamlutils_iface.go new file mode 100644 index 0000000000..49e646486e --- /dev/null +++ b/vendor/github.com/go-openapi/swag/yamlutils_iface.go @@ -0,0 +1,31 @@ +// Copyright 2015 go-swagger maintainers +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package swag + +import ( + "encoding/json" + + "github.com/go-openapi/swag/yamlutils" +) + +// YAMLToJSON converts YAML unmarshaled data into json compatible data +// +// Deprecated: use [yamlutils.YAMLToJSON] instead. +func YAMLToJSON(data interface{}) (json.RawMessage, error) { return yamlutils.YAMLToJSON(data) } + +// BytesToYAMLDoc converts a byte slice into a YAML document +// +// Deprecated: use [yamlutils.BytesToYAMLDoc] instead. +func BytesToYAMLDoc(data []byte) (interface{}, error) { return yamlutils.BytesToYAMLDoc(data) } diff --git a/vendor/github.com/josharian/intern/README.md b/vendor/github.com/josharian/intern/README.md deleted file mode 100644 index ffc44b219b..0000000000 --- a/vendor/github.com/josharian/intern/README.md +++ /dev/null @@ -1,5 +0,0 @@ -Docs: https://godoc.org/github.com/josharian/intern - -See also [Go issue 5160](https://golang.org/issue/5160). - -License: MIT diff --git a/vendor/github.com/josharian/intern/intern.go b/vendor/github.com/josharian/intern/intern.go deleted file mode 100644 index 7acb1fe90a..0000000000 --- a/vendor/github.com/josharian/intern/intern.go +++ /dev/null @@ -1,44 +0,0 @@ -// Package intern interns strings. -// Interning is best effort only. -// Interned strings may be removed automatically -// at any time without notification. -// All functions may be called concurrently -// with themselves and each other. -package intern - -import "sync" - -var ( - pool sync.Pool = sync.Pool{ - New: func() interface{} { - return make(map[string]string) - }, - } -) - -// String returns s, interned. -func String(s string) string { - m := pool.Get().(map[string]string) - c, ok := m[s] - if ok { - pool.Put(m) - return c - } - m[s] = s - pool.Put(m) - return s -} - -// Bytes returns b converted to a string, interned. -func Bytes(b []byte) string { - m := pool.Get().(map[string]string) - c, ok := m[string(b)] - if ok { - pool.Put(m) - return c - } - s := string(b) - m[s] = s - pool.Put(m) - return s -} diff --git a/vendor/github.com/josharian/intern/license.md b/vendor/github.com/josharian/intern/license.md deleted file mode 100644 index 353d3055f0..0000000000 --- a/vendor/github.com/josharian/intern/license.md +++ /dev/null @@ -1,21 +0,0 @@ -MIT License - -Copyright (c) 2019 Josh Bleecher Snyder - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all -copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -SOFTWARE. diff --git a/vendor/github.com/mailru/easyjson/LICENSE b/vendor/github.com/mailru/easyjson/LICENSE deleted file mode 100644 index fbff658f70..0000000000 --- a/vendor/github.com/mailru/easyjson/LICENSE +++ /dev/null @@ -1,7 +0,0 @@ -Copyright (c) 2016 Mail.Ru Group - -Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. diff --git a/vendor/github.com/mailru/easyjson/buffer/pool.go b/vendor/github.com/mailru/easyjson/buffer/pool.go deleted file mode 100644 index 598a54af9d..0000000000 --- a/vendor/github.com/mailru/easyjson/buffer/pool.go +++ /dev/null @@ -1,278 +0,0 @@ -// Package buffer implements a buffer for serialization, consisting of a chain of []byte-s to -// reduce copying and to allow reuse of individual chunks. -package buffer - -import ( - "io" - "net" - "sync" -) - -// PoolConfig contains configuration for the allocation and reuse strategy. -type PoolConfig struct { - StartSize int // Minimum chunk size that is allocated. - PooledSize int // Minimum chunk size that is reused, reusing chunks too small will result in overhead. - MaxSize int // Maximum chunk size that will be allocated. -} - -var config = PoolConfig{ - StartSize: 128, - PooledSize: 512, - MaxSize: 32768, -} - -// Reuse pool: chunk size -> pool. -var buffers = map[int]*sync.Pool{} - -func initBuffers() { - for l := config.PooledSize; l <= config.MaxSize; l *= 2 { - buffers[l] = new(sync.Pool) - } -} - -func init() { - initBuffers() -} - -// Init sets up a non-default pooling and allocation strategy. Should be run before serialization is done. -func Init(cfg PoolConfig) { - config = cfg - initBuffers() -} - -// putBuf puts a chunk to reuse pool if it can be reused. -func putBuf(buf []byte) { - size := cap(buf) - if size < config.PooledSize { - return - } - if c := buffers[size]; c != nil { - c.Put(buf[:0]) - } -} - -// getBuf gets a chunk from reuse pool or creates a new one if reuse failed. -func getBuf(size int) []byte { - if size >= config.PooledSize { - if c := buffers[size]; c != nil { - v := c.Get() - if v != nil { - return v.([]byte) - } - } - } - return make([]byte, 0, size) -} - -// Buffer is a buffer optimized for serialization without extra copying. -type Buffer struct { - - // Buf is the current chunk that can be used for serialization. - Buf []byte - - toPool []byte - bufs [][]byte -} - -// EnsureSpace makes sure that the current chunk contains at least s free bytes, -// possibly creating a new chunk. -func (b *Buffer) EnsureSpace(s int) { - if cap(b.Buf)-len(b.Buf) < s { - b.ensureSpaceSlow(s) - } -} - -func (b *Buffer) ensureSpaceSlow(s int) { - l := len(b.Buf) - if l > 0 { - if cap(b.toPool) != cap(b.Buf) { - // Chunk was reallocated, toPool can be pooled. - putBuf(b.toPool) - } - if cap(b.bufs) == 0 { - b.bufs = make([][]byte, 0, 8) - } - b.bufs = append(b.bufs, b.Buf) - l = cap(b.toPool) * 2 - } else { - l = config.StartSize - } - - if l > config.MaxSize { - l = config.MaxSize - } - b.Buf = getBuf(l) - b.toPool = b.Buf -} - -// AppendByte appends a single byte to buffer. -func (b *Buffer) AppendByte(data byte) { - b.EnsureSpace(1) - b.Buf = append(b.Buf, data) -} - -// AppendBytes appends a byte slice to buffer. -func (b *Buffer) AppendBytes(data []byte) { - if len(data) <= cap(b.Buf)-len(b.Buf) { - b.Buf = append(b.Buf, data...) // fast path - } else { - b.appendBytesSlow(data) - } -} - -func (b *Buffer) appendBytesSlow(data []byte) { - for len(data) > 0 { - b.EnsureSpace(1) - - sz := cap(b.Buf) - len(b.Buf) - if sz > len(data) { - sz = len(data) - } - - b.Buf = append(b.Buf, data[:sz]...) - data = data[sz:] - } -} - -// AppendString appends a string to buffer. -func (b *Buffer) AppendString(data string) { - if len(data) <= cap(b.Buf)-len(b.Buf) { - b.Buf = append(b.Buf, data...) // fast path - } else { - b.appendStringSlow(data) - } -} - -func (b *Buffer) appendStringSlow(data string) { - for len(data) > 0 { - b.EnsureSpace(1) - - sz := cap(b.Buf) - len(b.Buf) - if sz > len(data) { - sz = len(data) - } - - b.Buf = append(b.Buf, data[:sz]...) - data = data[sz:] - } -} - -// Size computes the size of a buffer by adding sizes of every chunk. -func (b *Buffer) Size() int { - size := len(b.Buf) - for _, buf := range b.bufs { - size += len(buf) - } - return size -} - -// DumpTo outputs the contents of a buffer to a writer and resets the buffer. -func (b *Buffer) DumpTo(w io.Writer) (written int, err error) { - bufs := net.Buffers(b.bufs) - if len(b.Buf) > 0 { - bufs = append(bufs, b.Buf) - } - n, err := bufs.WriteTo(w) - - for _, buf := range b.bufs { - putBuf(buf) - } - putBuf(b.toPool) - - b.bufs = nil - b.Buf = nil - b.toPool = nil - - return int(n), err -} - -// BuildBytes creates a single byte slice with all the contents of the buffer. Data is -// copied if it does not fit in a single chunk. You can optionally provide one byte -// slice as argument that it will try to reuse. -func (b *Buffer) BuildBytes(reuse ...[]byte) []byte { - if len(b.bufs) == 0 { - ret := b.Buf - b.toPool = nil - b.Buf = nil - return ret - } - - var ret []byte - size := b.Size() - - // If we got a buffer as argument and it is big enough, reuse it. - if len(reuse) == 1 && cap(reuse[0]) >= size { - ret = reuse[0][:0] - } else { - ret = make([]byte, 0, size) - } - for _, buf := range b.bufs { - ret = append(ret, buf...) - putBuf(buf) - } - - ret = append(ret, b.Buf...) - putBuf(b.toPool) - - b.bufs = nil - b.toPool = nil - b.Buf = nil - - return ret -} - -type readCloser struct { - offset int - bufs [][]byte -} - -func (r *readCloser) Read(p []byte) (n int, err error) { - for _, buf := range r.bufs { - // Copy as much as we can. - x := copy(p[n:], buf[r.offset:]) - n += x // Increment how much we filled. - - // Did we empty the whole buffer? - if r.offset+x == len(buf) { - // On to the next buffer. - r.offset = 0 - r.bufs = r.bufs[1:] - - // We can release this buffer. - putBuf(buf) - } else { - r.offset += x - } - - if n == len(p) { - break - } - } - // No buffers left or nothing read? - if len(r.bufs) == 0 { - err = io.EOF - } - return -} - -func (r *readCloser) Close() error { - // Release all remaining buffers. - for _, buf := range r.bufs { - putBuf(buf) - } - // In case Close gets called multiple times. - r.bufs = nil - - return nil -} - -// ReadCloser creates an io.ReadCloser with all the contents of the buffer. -func (b *Buffer) ReadCloser() io.ReadCloser { - ret := &readCloser{0, append(b.bufs, b.Buf)} - - b.bufs = nil - b.toPool = nil - b.Buf = nil - - return ret -} diff --git a/vendor/github.com/mailru/easyjson/jlexer/bytestostr.go b/vendor/github.com/mailru/easyjson/jlexer/bytestostr.go deleted file mode 100644 index ff7b27c5b2..0000000000 --- a/vendor/github.com/mailru/easyjson/jlexer/bytestostr.go +++ /dev/null @@ -1,24 +0,0 @@ -// This file will only be included to the build if neither -// easyjson_nounsafe nor appengine build tag is set. See README notes -// for more details. - -//+build !easyjson_nounsafe -//+build !appengine - -package jlexer - -import ( - "reflect" - "unsafe" -) - -// bytesToStr creates a string pointing at the slice to avoid copying. -// -// Warning: the string returned by the function should be used with care, as the whole input data -// chunk may be either blocked from being freed by GC because of a single string or the buffer.Data -// may be garbage-collected even when the string exists. -func bytesToStr(data []byte) string { - h := (*reflect.SliceHeader)(unsafe.Pointer(&data)) - shdr := reflect.StringHeader{Data: h.Data, Len: h.Len} - return *(*string)(unsafe.Pointer(&shdr)) -} diff --git a/vendor/github.com/mailru/easyjson/jlexer/bytestostr_nounsafe.go b/vendor/github.com/mailru/easyjson/jlexer/bytestostr_nounsafe.go deleted file mode 100644 index 864d1be676..0000000000 --- a/vendor/github.com/mailru/easyjson/jlexer/bytestostr_nounsafe.go +++ /dev/null @@ -1,13 +0,0 @@ -// This file is included to the build if any of the buildtags below -// are defined. Refer to README notes for more details. - -//+build easyjson_nounsafe appengine - -package jlexer - -// bytesToStr creates a string normally from []byte -// -// Note that this method is roughly 1.5x slower than using the 'unsafe' method. -func bytesToStr(data []byte) string { - return string(data) -} diff --git a/vendor/github.com/mailru/easyjson/jlexer/error.go b/vendor/github.com/mailru/easyjson/jlexer/error.go deleted file mode 100644 index e90ec40d05..0000000000 --- a/vendor/github.com/mailru/easyjson/jlexer/error.go +++ /dev/null @@ -1,15 +0,0 @@ -package jlexer - -import "fmt" - -// LexerError implements the error interface and represents all possible errors that can be -// generated during parsing the JSON data. -type LexerError struct { - Reason string - Offset int - Data string -} - -func (l *LexerError) Error() string { - return fmt.Sprintf("parse error: %s near offset %d of '%s'", l.Reason, l.Offset, l.Data) -} diff --git a/vendor/github.com/mailru/easyjson/jlexer/lexer.go b/vendor/github.com/mailru/easyjson/jlexer/lexer.go deleted file mode 100644 index b5f5e26132..0000000000 --- a/vendor/github.com/mailru/easyjson/jlexer/lexer.go +++ /dev/null @@ -1,1244 +0,0 @@ -// Package jlexer contains a JSON lexer implementation. -// -// It is expected that it is mostly used with generated parser code, so the interface is tuned -// for a parser that knows what kind of data is expected. -package jlexer - -import ( - "bytes" - "encoding/base64" - "encoding/json" - "errors" - "fmt" - "io" - "strconv" - "unicode" - "unicode/utf16" - "unicode/utf8" - - "github.com/josharian/intern" -) - -// tokenKind determines type of a token. -type tokenKind byte - -const ( - tokenUndef tokenKind = iota // No token. - tokenDelim // Delimiter: one of '{', '}', '[' or ']'. - tokenString // A string literal, e.g. "abc\u1234" - tokenNumber // Number literal, e.g. 1.5e5 - tokenBool // Boolean literal: true or false. - tokenNull // null keyword. -) - -// token describes a single token: type, position in the input and value. -type token struct { - kind tokenKind // Type of a token. - - boolValue bool // Value if a boolean literal token. - byteValueCloned bool // true if byteValue was allocated and does not refer to original json body - byteValue []byte // Raw value of a token. - delimValue byte -} - -// Lexer is a JSON lexer: it iterates over JSON tokens in a byte slice. -type Lexer struct { - Data []byte // Input data given to the lexer. - - start int // Start of the current token. - pos int // Current unscanned position in the input stream. - token token // Last scanned token, if token.kind != tokenUndef. - - firstElement bool // Whether current element is the first in array or an object. - wantSep byte // A comma or a colon character, which need to occur before a token. - - UseMultipleErrors bool // If we want to use multiple errors. - fatalError error // Fatal error occurred during lexing. It is usually a syntax error. - multipleErrors []*LexerError // Semantic errors occurred during lexing. Marshalling will be continued after finding this errors. -} - -// FetchToken scans the input for the next token. -func (r *Lexer) FetchToken() { - r.token.kind = tokenUndef - r.start = r.pos - - // Check if r.Data has r.pos element - // If it doesn't, it mean corrupted input data - if len(r.Data) < r.pos { - r.errParse("Unexpected end of data") - return - } - // Determine the type of a token by skipping whitespace and reading the - // first character. - for _, c := range r.Data[r.pos:] { - switch c { - case ':', ',': - if r.wantSep == c { - r.pos++ - r.start++ - r.wantSep = 0 - } else { - r.errSyntax() - } - - case ' ', '\t', '\r', '\n': - r.pos++ - r.start++ - - case '"': - if r.wantSep != 0 { - r.errSyntax() - } - - r.token.kind = tokenString - r.fetchString() - return - - case '{', '[': - if r.wantSep != 0 { - r.errSyntax() - } - r.firstElement = true - r.token.kind = tokenDelim - r.token.delimValue = r.Data[r.pos] - r.pos++ - return - - case '}', ']': - if !r.firstElement && (r.wantSep != ',') { - r.errSyntax() - } - r.wantSep = 0 - r.token.kind = tokenDelim - r.token.delimValue = r.Data[r.pos] - r.pos++ - return - - case '0', '1', '2', '3', '4', '5', '6', '7', '8', '9', '-': - if r.wantSep != 0 { - r.errSyntax() - } - r.token.kind = tokenNumber - r.fetchNumber() - return - - case 'n': - if r.wantSep != 0 { - r.errSyntax() - } - - r.token.kind = tokenNull - r.fetchNull() - return - - case 't': - if r.wantSep != 0 { - r.errSyntax() - } - - r.token.kind = tokenBool - r.token.boolValue = true - r.fetchTrue() - return - - case 'f': - if r.wantSep != 0 { - r.errSyntax() - } - - r.token.kind = tokenBool - r.token.boolValue = false - r.fetchFalse() - return - - default: - r.errSyntax() - return - } - } - r.fatalError = io.EOF - return -} - -// isTokenEnd returns true if the char can follow a non-delimiter token -func isTokenEnd(c byte) bool { - return c == ' ' || c == '\t' || c == '\r' || c == '\n' || c == '[' || c == ']' || c == '{' || c == '}' || c == ',' || c == ':' -} - -// fetchNull fetches and checks remaining bytes of null keyword. -func (r *Lexer) fetchNull() { - r.pos += 4 - if r.pos > len(r.Data) || - r.Data[r.pos-3] != 'u' || - r.Data[r.pos-2] != 'l' || - r.Data[r.pos-1] != 'l' || - (r.pos != len(r.Data) && !isTokenEnd(r.Data[r.pos])) { - - r.pos -= 4 - r.errSyntax() - } -} - -// fetchTrue fetches and checks remaining bytes of true keyword. -func (r *Lexer) fetchTrue() { - r.pos += 4 - if r.pos > len(r.Data) || - r.Data[r.pos-3] != 'r' || - r.Data[r.pos-2] != 'u' || - r.Data[r.pos-1] != 'e' || - (r.pos != len(r.Data) && !isTokenEnd(r.Data[r.pos])) { - - r.pos -= 4 - r.errSyntax() - } -} - -// fetchFalse fetches and checks remaining bytes of false keyword. -func (r *Lexer) fetchFalse() { - r.pos += 5 - if r.pos > len(r.Data) || - r.Data[r.pos-4] != 'a' || - r.Data[r.pos-3] != 'l' || - r.Data[r.pos-2] != 's' || - r.Data[r.pos-1] != 'e' || - (r.pos != len(r.Data) && !isTokenEnd(r.Data[r.pos])) { - - r.pos -= 5 - r.errSyntax() - } -} - -// fetchNumber scans a number literal token. -func (r *Lexer) fetchNumber() { - hasE := false - afterE := false - hasDot := false - - r.pos++ - for i, c := range r.Data[r.pos:] { - switch { - case c >= '0' && c <= '9': - afterE = false - case c == '.' && !hasDot: - hasDot = true - case (c == 'e' || c == 'E') && !hasE: - hasE = true - hasDot = true - afterE = true - case (c == '+' || c == '-') && afterE: - afterE = false - default: - r.pos += i - if !isTokenEnd(c) { - r.errSyntax() - } else { - r.token.byteValue = r.Data[r.start:r.pos] - } - return - } - } - - r.pos = len(r.Data) - r.token.byteValue = r.Data[r.start:] -} - -// findStringLen tries to scan into the string literal for ending quote char to determine required size. -// The size will be exact if no escapes are present and may be inexact if there are escaped chars. -func findStringLen(data []byte) (isValid bool, length int) { - for { - idx := bytes.IndexByte(data, '"') - if idx == -1 { - return false, len(data) - } - if idx == 0 || (idx > 0 && data[idx-1] != '\\') { - return true, length + idx - } - - // count \\\\\\\ sequences. even number of slashes means quote is not really escaped - cnt := 1 - for idx-cnt-1 >= 0 && data[idx-cnt-1] == '\\' { - cnt++ - } - if cnt%2 == 0 { - return true, length + idx - } - - length += idx + 1 - data = data[idx+1:] - } -} - -// unescapeStringToken performs unescaping of string token. -// if no escaping is needed, original string is returned, otherwise - a new one allocated -func (r *Lexer) unescapeStringToken() (err error) { - data := r.token.byteValue - var unescapedData []byte - - for { - i := bytes.IndexByte(data, '\\') - if i == -1 { - break - } - - escapedRune, escapedBytes, err := decodeEscape(data[i:]) - if err != nil { - r.errParse(err.Error()) - return err - } - - if unescapedData == nil { - unescapedData = make([]byte, 0, len(r.token.byteValue)) - } - - var d [4]byte - s := utf8.EncodeRune(d[:], escapedRune) - unescapedData = append(unescapedData, data[:i]...) - unescapedData = append(unescapedData, d[:s]...) - - data = data[i+escapedBytes:] - } - - if unescapedData != nil { - r.token.byteValue = append(unescapedData, data...) - r.token.byteValueCloned = true - } - return -} - -// getu4 decodes \uXXXX from the beginning of s, returning the hex value, -// or it returns -1. -func getu4(s []byte) rune { - if len(s) < 6 || s[0] != '\\' || s[1] != 'u' { - return -1 - } - var val rune - for i := 2; i < len(s) && i < 6; i++ { - var v byte - c := s[i] - switch c { - case '0', '1', '2', '3', '4', '5', '6', '7', '8', '9': - v = c - '0' - case 'a', 'b', 'c', 'd', 'e', 'f': - v = c - 'a' + 10 - case 'A', 'B', 'C', 'D', 'E', 'F': - v = c - 'A' + 10 - default: - return -1 - } - - val <<= 4 - val |= rune(v) - } - return val -} - -// decodeEscape processes a single escape sequence and returns number of bytes processed. -func decodeEscape(data []byte) (decoded rune, bytesProcessed int, err error) { - if len(data) < 2 { - return 0, 0, errors.New("incorrect escape symbol \\ at the end of token") - } - - c := data[1] - switch c { - case '"', '/', '\\': - return rune(c), 2, nil - case 'b': - return '\b', 2, nil - case 'f': - return '\f', 2, nil - case 'n': - return '\n', 2, nil - case 'r': - return '\r', 2, nil - case 't': - return '\t', 2, nil - case 'u': - rr := getu4(data) - if rr < 0 { - return 0, 0, errors.New("incorrectly escaped \\uXXXX sequence") - } - - read := 6 - if utf16.IsSurrogate(rr) { - rr1 := getu4(data[read:]) - if dec := utf16.DecodeRune(rr, rr1); dec != unicode.ReplacementChar { - read += 6 - rr = dec - } else { - rr = unicode.ReplacementChar - } - } - return rr, read, nil - } - - return 0, 0, errors.New("incorrectly escaped bytes") -} - -// fetchString scans a string literal token. -func (r *Lexer) fetchString() { - r.pos++ - data := r.Data[r.pos:] - - isValid, length := findStringLen(data) - if !isValid { - r.pos += length - r.errParse("unterminated string literal") - return - } - r.token.byteValue = data[:length] - r.pos += length + 1 // skip closing '"' as well -} - -// scanToken scans the next token if no token is currently available in the lexer. -func (r *Lexer) scanToken() { - if r.token.kind != tokenUndef || r.fatalError != nil { - return - } - - r.FetchToken() -} - -// consume resets the current token to allow scanning the next one. -func (r *Lexer) consume() { - r.token.kind = tokenUndef - r.token.byteValueCloned = false - r.token.delimValue = 0 -} - -// Ok returns true if no error (including io.EOF) was encountered during scanning. -func (r *Lexer) Ok() bool { - return r.fatalError == nil -} - -const maxErrorContextLen = 13 - -func (r *Lexer) errParse(what string) { - if r.fatalError == nil { - var str string - if len(r.Data)-r.pos <= maxErrorContextLen { - str = string(r.Data) - } else { - str = string(r.Data[r.pos:r.pos+maxErrorContextLen-3]) + "..." - } - r.fatalError = &LexerError{ - Reason: what, - Offset: r.pos, - Data: str, - } - } -} - -func (r *Lexer) errSyntax() { - r.errParse("syntax error") -} - -func (r *Lexer) errInvalidToken(expected string) { - if r.fatalError != nil { - return - } - if r.UseMultipleErrors { - r.pos = r.start - r.consume() - r.SkipRecursive() - switch expected { - case "[": - r.token.delimValue = ']' - r.token.kind = tokenDelim - case "{": - r.token.delimValue = '}' - r.token.kind = tokenDelim - } - r.addNonfatalError(&LexerError{ - Reason: fmt.Sprintf("expected %s", expected), - Offset: r.start, - Data: string(r.Data[r.start:r.pos]), - }) - return - } - - var str string - if len(r.token.byteValue) <= maxErrorContextLen { - str = string(r.token.byteValue) - } else { - str = string(r.token.byteValue[:maxErrorContextLen-3]) + "..." - } - r.fatalError = &LexerError{ - Reason: fmt.Sprintf("expected %s", expected), - Offset: r.pos, - Data: str, - } -} - -func (r *Lexer) GetPos() int { - return r.pos -} - -// Delim consumes a token and verifies that it is the given delimiter. -func (r *Lexer) Delim(c byte) { - if r.token.kind == tokenUndef && r.Ok() { - r.FetchToken() - } - - if !r.Ok() || r.token.delimValue != c { - r.consume() // errInvalidToken can change token if UseMultipleErrors is enabled. - r.errInvalidToken(string([]byte{c})) - } else { - r.consume() - } -} - -// IsDelim returns true if there was no scanning error and next token is the given delimiter. -func (r *Lexer) IsDelim(c byte) bool { - if r.token.kind == tokenUndef && r.Ok() { - r.FetchToken() - } - return !r.Ok() || r.token.delimValue == c -} - -// Null verifies that the next token is null and consumes it. -func (r *Lexer) Null() { - if r.token.kind == tokenUndef && r.Ok() { - r.FetchToken() - } - if !r.Ok() || r.token.kind != tokenNull { - r.errInvalidToken("null") - } - r.consume() -} - -// IsNull returns true if the next token is a null keyword. -func (r *Lexer) IsNull() bool { - if r.token.kind == tokenUndef && r.Ok() { - r.FetchToken() - } - return r.Ok() && r.token.kind == tokenNull -} - -// Skip skips a single token. -func (r *Lexer) Skip() { - if r.token.kind == tokenUndef && r.Ok() { - r.FetchToken() - } - r.consume() -} - -// SkipRecursive skips next array or object completely, or just skips a single token if not -// an array/object. -// -// Note: no syntax validation is performed on the skipped data. -func (r *Lexer) SkipRecursive() { - r.scanToken() - var start, end byte - startPos := r.start - - switch r.token.delimValue { - case '{': - start, end = '{', '}' - case '[': - start, end = '[', ']' - default: - r.consume() - return - } - - r.consume() - - level := 1 - inQuotes := false - wasEscape := false - - for i, c := range r.Data[r.pos:] { - switch { - case c == start && !inQuotes: - level++ - case c == end && !inQuotes: - level-- - if level == 0 { - r.pos += i + 1 - if !json.Valid(r.Data[startPos:r.pos]) { - r.pos = len(r.Data) - r.fatalError = &LexerError{ - Reason: "skipped array/object json value is invalid", - Offset: r.pos, - Data: string(r.Data[r.pos:]), - } - } - return - } - case c == '\\' && inQuotes: - wasEscape = !wasEscape - continue - case c == '"' && inQuotes: - inQuotes = wasEscape - case c == '"': - inQuotes = true - } - wasEscape = false - } - r.pos = len(r.Data) - r.fatalError = &LexerError{ - Reason: "EOF reached while skipping array/object or token", - Offset: r.pos, - Data: string(r.Data[r.pos:]), - } -} - -// Raw fetches the next item recursively as a data slice -func (r *Lexer) Raw() []byte { - r.SkipRecursive() - if !r.Ok() { - return nil - } - return r.Data[r.start:r.pos] -} - -// IsStart returns whether the lexer is positioned at the start -// of an input string. -func (r *Lexer) IsStart() bool { - return r.pos == 0 -} - -// Consumed reads all remaining bytes from the input, publishing an error if -// there is anything but whitespace remaining. -func (r *Lexer) Consumed() { - if r.pos > len(r.Data) || !r.Ok() { - return - } - - for _, c := range r.Data[r.pos:] { - if c != ' ' && c != '\t' && c != '\r' && c != '\n' { - r.AddError(&LexerError{ - Reason: "invalid character '" + string(c) + "' after top-level value", - Offset: r.pos, - Data: string(r.Data[r.pos:]), - }) - return - } - - r.pos++ - r.start++ - } -} - -func (r *Lexer) unsafeString(skipUnescape bool) (string, []byte) { - if r.token.kind == tokenUndef && r.Ok() { - r.FetchToken() - } - if !r.Ok() || r.token.kind != tokenString { - r.errInvalidToken("string") - return "", nil - } - if !skipUnescape { - if err := r.unescapeStringToken(); err != nil { - r.errInvalidToken("string") - return "", nil - } - } - - bytes := r.token.byteValue - ret := bytesToStr(r.token.byteValue) - r.consume() - return ret, bytes -} - -// UnsafeString returns the string value if the token is a string literal. -// -// Warning: returned string may point to the input buffer, so the string should not outlive -// the input buffer. Intended pattern of usage is as an argument to a switch statement. -func (r *Lexer) UnsafeString() string { - ret, _ := r.unsafeString(false) - return ret -} - -// UnsafeBytes returns the byte slice if the token is a string literal. -func (r *Lexer) UnsafeBytes() []byte { - _, ret := r.unsafeString(false) - return ret -} - -// UnsafeFieldName returns current member name string token -func (r *Lexer) UnsafeFieldName(skipUnescape bool) string { - ret, _ := r.unsafeString(skipUnescape) - return ret -} - -// String reads a string literal. -func (r *Lexer) String() string { - if r.token.kind == tokenUndef && r.Ok() { - r.FetchToken() - } - if !r.Ok() || r.token.kind != tokenString { - r.errInvalidToken("string") - return "" - } - if err := r.unescapeStringToken(); err != nil { - r.errInvalidToken("string") - return "" - } - var ret string - if r.token.byteValueCloned { - ret = bytesToStr(r.token.byteValue) - } else { - ret = string(r.token.byteValue) - } - r.consume() - return ret -} - -// StringIntern reads a string literal, and performs string interning on it. -func (r *Lexer) StringIntern() string { - if r.token.kind == tokenUndef && r.Ok() { - r.FetchToken() - } - if !r.Ok() || r.token.kind != tokenString { - r.errInvalidToken("string") - return "" - } - if err := r.unescapeStringToken(); err != nil { - r.errInvalidToken("string") - return "" - } - ret := intern.Bytes(r.token.byteValue) - r.consume() - return ret -} - -// Bytes reads a string literal and base64 decodes it into a byte slice. -func (r *Lexer) Bytes() []byte { - if r.token.kind == tokenUndef && r.Ok() { - r.FetchToken() - } - if !r.Ok() || r.token.kind != tokenString { - r.errInvalidToken("string") - return nil - } - if err := r.unescapeStringToken(); err != nil { - r.errInvalidToken("string") - return nil - } - ret := make([]byte, base64.StdEncoding.DecodedLen(len(r.token.byteValue))) - n, err := base64.StdEncoding.Decode(ret, r.token.byteValue) - if err != nil { - r.fatalError = &LexerError{ - Reason: err.Error(), - } - return nil - } - - r.consume() - return ret[:n] -} - -// Bool reads a true or false boolean keyword. -func (r *Lexer) Bool() bool { - if r.token.kind == tokenUndef && r.Ok() { - r.FetchToken() - } - if !r.Ok() || r.token.kind != tokenBool { - r.errInvalidToken("bool") - return false - } - ret := r.token.boolValue - r.consume() - return ret -} - -func (r *Lexer) number() string { - if r.token.kind == tokenUndef && r.Ok() { - r.FetchToken() - } - if !r.Ok() || r.token.kind != tokenNumber { - r.errInvalidToken("number") - return "" - } - ret := bytesToStr(r.token.byteValue) - r.consume() - return ret -} - -func (r *Lexer) Uint8() uint8 { - s := r.number() - if !r.Ok() { - return 0 - } - - n, err := strconv.ParseUint(s, 10, 8) - if err != nil { - r.addNonfatalError(&LexerError{ - Offset: r.start, - Reason: err.Error(), - Data: s, - }) - } - return uint8(n) -} - -func (r *Lexer) Uint16() uint16 { - s := r.number() - if !r.Ok() { - return 0 - } - - n, err := strconv.ParseUint(s, 10, 16) - if err != nil { - r.addNonfatalError(&LexerError{ - Offset: r.start, - Reason: err.Error(), - Data: s, - }) - } - return uint16(n) -} - -func (r *Lexer) Uint32() uint32 { - s := r.number() - if !r.Ok() { - return 0 - } - - n, err := strconv.ParseUint(s, 10, 32) - if err != nil { - r.addNonfatalError(&LexerError{ - Offset: r.start, - Reason: err.Error(), - Data: s, - }) - } - return uint32(n) -} - -func (r *Lexer) Uint64() uint64 { - s := r.number() - if !r.Ok() { - return 0 - } - - n, err := strconv.ParseUint(s, 10, 64) - if err != nil { - r.addNonfatalError(&LexerError{ - Offset: r.start, - Reason: err.Error(), - Data: s, - }) - } - return n -} - -func (r *Lexer) Uint() uint { - return uint(r.Uint64()) -} - -func (r *Lexer) Int8() int8 { - s := r.number() - if !r.Ok() { - return 0 - } - - n, err := strconv.ParseInt(s, 10, 8) - if err != nil { - r.addNonfatalError(&LexerError{ - Offset: r.start, - Reason: err.Error(), - Data: s, - }) - } - return int8(n) -} - -func (r *Lexer) Int16() int16 { - s := r.number() - if !r.Ok() { - return 0 - } - - n, err := strconv.ParseInt(s, 10, 16) - if err != nil { - r.addNonfatalError(&LexerError{ - Offset: r.start, - Reason: err.Error(), - Data: s, - }) - } - return int16(n) -} - -func (r *Lexer) Int32() int32 { - s := r.number() - if !r.Ok() { - return 0 - } - - n, err := strconv.ParseInt(s, 10, 32) - if err != nil { - r.addNonfatalError(&LexerError{ - Offset: r.start, - Reason: err.Error(), - Data: s, - }) - } - return int32(n) -} - -func (r *Lexer) Int64() int64 { - s := r.number() - if !r.Ok() { - return 0 - } - - n, err := strconv.ParseInt(s, 10, 64) - if err != nil { - r.addNonfatalError(&LexerError{ - Offset: r.start, - Reason: err.Error(), - Data: s, - }) - } - return n -} - -func (r *Lexer) Int() int { - return int(r.Int64()) -} - -func (r *Lexer) Uint8Str() uint8 { - s, b := r.unsafeString(false) - if !r.Ok() { - return 0 - } - - n, err := strconv.ParseUint(s, 10, 8) - if err != nil { - r.addNonfatalError(&LexerError{ - Offset: r.start, - Reason: err.Error(), - Data: string(b), - }) - } - return uint8(n) -} - -func (r *Lexer) Uint16Str() uint16 { - s, b := r.unsafeString(false) - if !r.Ok() { - return 0 - } - - n, err := strconv.ParseUint(s, 10, 16) - if err != nil { - r.addNonfatalError(&LexerError{ - Offset: r.start, - Reason: err.Error(), - Data: string(b), - }) - } - return uint16(n) -} - -func (r *Lexer) Uint32Str() uint32 { - s, b := r.unsafeString(false) - if !r.Ok() { - return 0 - } - - n, err := strconv.ParseUint(s, 10, 32) - if err != nil { - r.addNonfatalError(&LexerError{ - Offset: r.start, - Reason: err.Error(), - Data: string(b), - }) - } - return uint32(n) -} - -func (r *Lexer) Uint64Str() uint64 { - s, b := r.unsafeString(false) - if !r.Ok() { - return 0 - } - - n, err := strconv.ParseUint(s, 10, 64) - if err != nil { - r.addNonfatalError(&LexerError{ - Offset: r.start, - Reason: err.Error(), - Data: string(b), - }) - } - return n -} - -func (r *Lexer) UintStr() uint { - return uint(r.Uint64Str()) -} - -func (r *Lexer) UintptrStr() uintptr { - return uintptr(r.Uint64Str()) -} - -func (r *Lexer) Int8Str() int8 { - s, b := r.unsafeString(false) - if !r.Ok() { - return 0 - } - - n, err := strconv.ParseInt(s, 10, 8) - if err != nil { - r.addNonfatalError(&LexerError{ - Offset: r.start, - Reason: err.Error(), - Data: string(b), - }) - } - return int8(n) -} - -func (r *Lexer) Int16Str() int16 { - s, b := r.unsafeString(false) - if !r.Ok() { - return 0 - } - - n, err := strconv.ParseInt(s, 10, 16) - if err != nil { - r.addNonfatalError(&LexerError{ - Offset: r.start, - Reason: err.Error(), - Data: string(b), - }) - } - return int16(n) -} - -func (r *Lexer) Int32Str() int32 { - s, b := r.unsafeString(false) - if !r.Ok() { - return 0 - } - - n, err := strconv.ParseInt(s, 10, 32) - if err != nil { - r.addNonfatalError(&LexerError{ - Offset: r.start, - Reason: err.Error(), - Data: string(b), - }) - } - return int32(n) -} - -func (r *Lexer) Int64Str() int64 { - s, b := r.unsafeString(false) - if !r.Ok() { - return 0 - } - - n, err := strconv.ParseInt(s, 10, 64) - if err != nil { - r.addNonfatalError(&LexerError{ - Offset: r.start, - Reason: err.Error(), - Data: string(b), - }) - } - return n -} - -func (r *Lexer) IntStr() int { - return int(r.Int64Str()) -} - -func (r *Lexer) Float32() float32 { - s := r.number() - if !r.Ok() { - return 0 - } - - n, err := strconv.ParseFloat(s, 32) - if err != nil { - r.addNonfatalError(&LexerError{ - Offset: r.start, - Reason: err.Error(), - Data: s, - }) - } - return float32(n) -} - -func (r *Lexer) Float32Str() float32 { - s, b := r.unsafeString(false) - if !r.Ok() { - return 0 - } - n, err := strconv.ParseFloat(s, 32) - if err != nil { - r.addNonfatalError(&LexerError{ - Offset: r.start, - Reason: err.Error(), - Data: string(b), - }) - } - return float32(n) -} - -func (r *Lexer) Float64() float64 { - s := r.number() - if !r.Ok() { - return 0 - } - - n, err := strconv.ParseFloat(s, 64) - if err != nil { - r.addNonfatalError(&LexerError{ - Offset: r.start, - Reason: err.Error(), - Data: s, - }) - } - return n -} - -func (r *Lexer) Float64Str() float64 { - s, b := r.unsafeString(false) - if !r.Ok() { - return 0 - } - n, err := strconv.ParseFloat(s, 64) - if err != nil { - r.addNonfatalError(&LexerError{ - Offset: r.start, - Reason: err.Error(), - Data: string(b), - }) - } - return n -} - -func (r *Lexer) Error() error { - return r.fatalError -} - -func (r *Lexer) AddError(e error) { - if r.fatalError == nil { - r.fatalError = e - } -} - -func (r *Lexer) AddNonFatalError(e error) { - r.addNonfatalError(&LexerError{ - Offset: r.start, - Data: string(r.Data[r.start:r.pos]), - Reason: e.Error(), - }) -} - -func (r *Lexer) addNonfatalError(err *LexerError) { - if r.UseMultipleErrors { - // We don't want to add errors with the same offset. - if len(r.multipleErrors) != 0 && r.multipleErrors[len(r.multipleErrors)-1].Offset == err.Offset { - return - } - r.multipleErrors = append(r.multipleErrors, err) - return - } - r.fatalError = err -} - -func (r *Lexer) GetNonFatalErrors() []*LexerError { - return r.multipleErrors -} - -// JsonNumber fetches and json.Number from 'encoding/json' package. -// Both int, float or string, contains them are valid values -func (r *Lexer) JsonNumber() json.Number { - if r.token.kind == tokenUndef && r.Ok() { - r.FetchToken() - } - if !r.Ok() { - r.errInvalidToken("json.Number") - return json.Number("") - } - - switch r.token.kind { - case tokenString: - return json.Number(r.String()) - case tokenNumber: - return json.Number(r.Raw()) - case tokenNull: - r.Null() - return json.Number("") - default: - r.errSyntax() - return json.Number("") - } -} - -// Interface fetches an interface{} analogous to the 'encoding/json' package. -func (r *Lexer) Interface() interface{} { - if r.token.kind == tokenUndef && r.Ok() { - r.FetchToken() - } - - if !r.Ok() { - return nil - } - switch r.token.kind { - case tokenString: - return r.String() - case tokenNumber: - return r.Float64() - case tokenBool: - return r.Bool() - case tokenNull: - r.Null() - return nil - } - - if r.token.delimValue == '{' { - r.consume() - - ret := map[string]interface{}{} - for !r.IsDelim('}') { - key := r.String() - r.WantColon() - ret[key] = r.Interface() - r.WantComma() - } - r.Delim('}') - - if r.Ok() { - return ret - } else { - return nil - } - } else if r.token.delimValue == '[' { - r.consume() - - ret := []interface{}{} - for !r.IsDelim(']') { - ret = append(ret, r.Interface()) - r.WantComma() - } - r.Delim(']') - - if r.Ok() { - return ret - } else { - return nil - } - } - r.errSyntax() - return nil -} - -// WantComma requires a comma to be present before fetching next token. -func (r *Lexer) WantComma() { - r.wantSep = ',' - r.firstElement = false -} - -// WantColon requires a colon to be present before fetching next token. -func (r *Lexer) WantColon() { - r.wantSep = ':' - r.firstElement = false -} diff --git a/vendor/github.com/mailru/easyjson/jwriter/writer.go b/vendor/github.com/mailru/easyjson/jwriter/writer.go deleted file mode 100644 index 2c5b20105b..0000000000 --- a/vendor/github.com/mailru/easyjson/jwriter/writer.go +++ /dev/null @@ -1,405 +0,0 @@ -// Package jwriter contains a JSON writer. -package jwriter - -import ( - "io" - "strconv" - "unicode/utf8" - - "github.com/mailru/easyjson/buffer" -) - -// Flags describe various encoding options. The behavior may be actually implemented in the encoder, but -// Flags field in Writer is used to set and pass them around. -type Flags int - -const ( - NilMapAsEmpty Flags = 1 << iota // Encode nil map as '{}' rather than 'null'. - NilSliceAsEmpty // Encode nil slice as '[]' rather than 'null'. -) - -// Writer is a JSON writer. -type Writer struct { - Flags Flags - - Error error - Buffer buffer.Buffer - NoEscapeHTML bool -} - -// Size returns the size of the data that was written out. -func (w *Writer) Size() int { - return w.Buffer.Size() -} - -// DumpTo outputs the data to given io.Writer, resetting the buffer. -func (w *Writer) DumpTo(out io.Writer) (written int, err error) { - return w.Buffer.DumpTo(out) -} - -// BuildBytes returns writer data as a single byte slice. You can optionally provide one byte slice -// as argument that it will try to reuse. -func (w *Writer) BuildBytes(reuse ...[]byte) ([]byte, error) { - if w.Error != nil { - return nil, w.Error - } - - return w.Buffer.BuildBytes(reuse...), nil -} - -// ReadCloser returns an io.ReadCloser that can be used to read the data. -// ReadCloser also resets the buffer. -func (w *Writer) ReadCloser() (io.ReadCloser, error) { - if w.Error != nil { - return nil, w.Error - } - - return w.Buffer.ReadCloser(), nil -} - -// RawByte appends raw binary data to the buffer. -func (w *Writer) RawByte(c byte) { - w.Buffer.AppendByte(c) -} - -// RawByte appends raw binary data to the buffer. -func (w *Writer) RawString(s string) { - w.Buffer.AppendString(s) -} - -// Raw appends raw binary data to the buffer or sets the error if it is given. Useful for -// calling with results of MarshalJSON-like functions. -func (w *Writer) Raw(data []byte, err error) { - switch { - case w.Error != nil: - return - case err != nil: - w.Error = err - case len(data) > 0: - w.Buffer.AppendBytes(data) - default: - w.RawString("null") - } -} - -// RawText encloses raw binary data in quotes and appends in to the buffer. -// Useful for calling with results of MarshalText-like functions. -func (w *Writer) RawText(data []byte, err error) { - switch { - case w.Error != nil: - return - case err != nil: - w.Error = err - case len(data) > 0: - w.String(string(data)) - default: - w.RawString("null") - } -} - -// Base64Bytes appends data to the buffer after base64 encoding it -func (w *Writer) Base64Bytes(data []byte) { - if data == nil { - w.Buffer.AppendString("null") - return - } - w.Buffer.AppendByte('"') - w.base64(data) - w.Buffer.AppendByte('"') -} - -func (w *Writer) Uint8(n uint8) { - w.Buffer.EnsureSpace(3) - w.Buffer.Buf = strconv.AppendUint(w.Buffer.Buf, uint64(n), 10) -} - -func (w *Writer) Uint16(n uint16) { - w.Buffer.EnsureSpace(5) - w.Buffer.Buf = strconv.AppendUint(w.Buffer.Buf, uint64(n), 10) -} - -func (w *Writer) Uint32(n uint32) { - w.Buffer.EnsureSpace(10) - w.Buffer.Buf = strconv.AppendUint(w.Buffer.Buf, uint64(n), 10) -} - -func (w *Writer) Uint(n uint) { - w.Buffer.EnsureSpace(20) - w.Buffer.Buf = strconv.AppendUint(w.Buffer.Buf, uint64(n), 10) -} - -func (w *Writer) Uint64(n uint64) { - w.Buffer.EnsureSpace(20) - w.Buffer.Buf = strconv.AppendUint(w.Buffer.Buf, n, 10) -} - -func (w *Writer) Int8(n int8) { - w.Buffer.EnsureSpace(4) - w.Buffer.Buf = strconv.AppendInt(w.Buffer.Buf, int64(n), 10) -} - -func (w *Writer) Int16(n int16) { - w.Buffer.EnsureSpace(6) - w.Buffer.Buf = strconv.AppendInt(w.Buffer.Buf, int64(n), 10) -} - -func (w *Writer) Int32(n int32) { - w.Buffer.EnsureSpace(11) - w.Buffer.Buf = strconv.AppendInt(w.Buffer.Buf, int64(n), 10) -} - -func (w *Writer) Int(n int) { - w.Buffer.EnsureSpace(21) - w.Buffer.Buf = strconv.AppendInt(w.Buffer.Buf, int64(n), 10) -} - -func (w *Writer) Int64(n int64) { - w.Buffer.EnsureSpace(21) - w.Buffer.Buf = strconv.AppendInt(w.Buffer.Buf, n, 10) -} - -func (w *Writer) Uint8Str(n uint8) { - w.Buffer.EnsureSpace(3) - w.Buffer.Buf = append(w.Buffer.Buf, '"') - w.Buffer.Buf = strconv.AppendUint(w.Buffer.Buf, uint64(n), 10) - w.Buffer.Buf = append(w.Buffer.Buf, '"') -} - -func (w *Writer) Uint16Str(n uint16) { - w.Buffer.EnsureSpace(5) - w.Buffer.Buf = append(w.Buffer.Buf, '"') - w.Buffer.Buf = strconv.AppendUint(w.Buffer.Buf, uint64(n), 10) - w.Buffer.Buf = append(w.Buffer.Buf, '"') -} - -func (w *Writer) Uint32Str(n uint32) { - w.Buffer.EnsureSpace(10) - w.Buffer.Buf = append(w.Buffer.Buf, '"') - w.Buffer.Buf = strconv.AppendUint(w.Buffer.Buf, uint64(n), 10) - w.Buffer.Buf = append(w.Buffer.Buf, '"') -} - -func (w *Writer) UintStr(n uint) { - w.Buffer.EnsureSpace(20) - w.Buffer.Buf = append(w.Buffer.Buf, '"') - w.Buffer.Buf = strconv.AppendUint(w.Buffer.Buf, uint64(n), 10) - w.Buffer.Buf = append(w.Buffer.Buf, '"') -} - -func (w *Writer) Uint64Str(n uint64) { - w.Buffer.EnsureSpace(20) - w.Buffer.Buf = append(w.Buffer.Buf, '"') - w.Buffer.Buf = strconv.AppendUint(w.Buffer.Buf, n, 10) - w.Buffer.Buf = append(w.Buffer.Buf, '"') -} - -func (w *Writer) UintptrStr(n uintptr) { - w.Buffer.EnsureSpace(20) - w.Buffer.Buf = append(w.Buffer.Buf, '"') - w.Buffer.Buf = strconv.AppendUint(w.Buffer.Buf, uint64(n), 10) - w.Buffer.Buf = append(w.Buffer.Buf, '"') -} - -func (w *Writer) Int8Str(n int8) { - w.Buffer.EnsureSpace(4) - w.Buffer.Buf = append(w.Buffer.Buf, '"') - w.Buffer.Buf = strconv.AppendInt(w.Buffer.Buf, int64(n), 10) - w.Buffer.Buf = append(w.Buffer.Buf, '"') -} - -func (w *Writer) Int16Str(n int16) { - w.Buffer.EnsureSpace(6) - w.Buffer.Buf = append(w.Buffer.Buf, '"') - w.Buffer.Buf = strconv.AppendInt(w.Buffer.Buf, int64(n), 10) - w.Buffer.Buf = append(w.Buffer.Buf, '"') -} - -func (w *Writer) Int32Str(n int32) { - w.Buffer.EnsureSpace(11) - w.Buffer.Buf = append(w.Buffer.Buf, '"') - w.Buffer.Buf = strconv.AppendInt(w.Buffer.Buf, int64(n), 10) - w.Buffer.Buf = append(w.Buffer.Buf, '"') -} - -func (w *Writer) IntStr(n int) { - w.Buffer.EnsureSpace(21) - w.Buffer.Buf = append(w.Buffer.Buf, '"') - w.Buffer.Buf = strconv.AppendInt(w.Buffer.Buf, int64(n), 10) - w.Buffer.Buf = append(w.Buffer.Buf, '"') -} - -func (w *Writer) Int64Str(n int64) { - w.Buffer.EnsureSpace(21) - w.Buffer.Buf = append(w.Buffer.Buf, '"') - w.Buffer.Buf = strconv.AppendInt(w.Buffer.Buf, n, 10) - w.Buffer.Buf = append(w.Buffer.Buf, '"') -} - -func (w *Writer) Float32(n float32) { - w.Buffer.EnsureSpace(20) - w.Buffer.Buf = strconv.AppendFloat(w.Buffer.Buf, float64(n), 'g', -1, 32) -} - -func (w *Writer) Float32Str(n float32) { - w.Buffer.EnsureSpace(20) - w.Buffer.Buf = append(w.Buffer.Buf, '"') - w.Buffer.Buf = strconv.AppendFloat(w.Buffer.Buf, float64(n), 'g', -1, 32) - w.Buffer.Buf = append(w.Buffer.Buf, '"') -} - -func (w *Writer) Float64(n float64) { - w.Buffer.EnsureSpace(20) - w.Buffer.Buf = strconv.AppendFloat(w.Buffer.Buf, n, 'g', -1, 64) -} - -func (w *Writer) Float64Str(n float64) { - w.Buffer.EnsureSpace(20) - w.Buffer.Buf = append(w.Buffer.Buf, '"') - w.Buffer.Buf = strconv.AppendFloat(w.Buffer.Buf, float64(n), 'g', -1, 64) - w.Buffer.Buf = append(w.Buffer.Buf, '"') -} - -func (w *Writer) Bool(v bool) { - w.Buffer.EnsureSpace(5) - if v { - w.Buffer.Buf = append(w.Buffer.Buf, "true"...) - } else { - w.Buffer.Buf = append(w.Buffer.Buf, "false"...) - } -} - -const chars = "0123456789abcdef" - -func getTable(falseValues ...int) [128]bool { - table := [128]bool{} - - for i := 0; i < 128; i++ { - table[i] = true - } - - for _, v := range falseValues { - table[v] = false - } - - return table -} - -var ( - htmlEscapeTable = getTable(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, '"', '&', '<', '>', '\\') - htmlNoEscapeTable = getTable(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, '"', '\\') -) - -func (w *Writer) String(s string) { - w.Buffer.AppendByte('"') - - // Portions of the string that contain no escapes are appended as - // byte slices. - - p := 0 // last non-escape symbol - - escapeTable := &htmlEscapeTable - if w.NoEscapeHTML { - escapeTable = &htmlNoEscapeTable - } - - for i := 0; i < len(s); { - c := s[i] - - if c < utf8.RuneSelf { - if escapeTable[c] { - // single-width character, no escaping is required - i++ - continue - } - - w.Buffer.AppendString(s[p:i]) - switch c { - case '\t': - w.Buffer.AppendString(`\t`) - case '\r': - w.Buffer.AppendString(`\r`) - case '\n': - w.Buffer.AppendString(`\n`) - case '\\': - w.Buffer.AppendString(`\\`) - case '"': - w.Buffer.AppendString(`\"`) - default: - w.Buffer.AppendString(`\u00`) - w.Buffer.AppendByte(chars[c>>4]) - w.Buffer.AppendByte(chars[c&0xf]) - } - - i++ - p = i - continue - } - - // broken utf - runeValue, runeWidth := utf8.DecodeRuneInString(s[i:]) - if runeValue == utf8.RuneError && runeWidth == 1 { - w.Buffer.AppendString(s[p:i]) - w.Buffer.AppendString(`\ufffd`) - i++ - p = i - continue - } - - // jsonp stuff - tab separator and line separator - if runeValue == '\u2028' || runeValue == '\u2029' { - w.Buffer.AppendString(s[p:i]) - w.Buffer.AppendString(`\u202`) - w.Buffer.AppendByte(chars[runeValue&0xf]) - i += runeWidth - p = i - continue - } - i += runeWidth - } - w.Buffer.AppendString(s[p:]) - w.Buffer.AppendByte('"') -} - -const encode = "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/" -const padChar = '=' - -func (w *Writer) base64(in []byte) { - - if len(in) == 0 { - return - } - - w.Buffer.EnsureSpace(((len(in)-1)/3 + 1) * 4) - - si := 0 - n := (len(in) / 3) * 3 - - for si < n { - // Convert 3x 8bit source bytes into 4 bytes - val := uint(in[si+0])<<16 | uint(in[si+1])<<8 | uint(in[si+2]) - - w.Buffer.Buf = append(w.Buffer.Buf, encode[val>>18&0x3F], encode[val>>12&0x3F], encode[val>>6&0x3F], encode[val&0x3F]) - - si += 3 - } - - remain := len(in) - si - if remain == 0 { - return - } - - // Add the remaining small block - val := uint(in[si+0]) << 16 - if remain == 2 { - val |= uint(in[si+1]) << 8 - } - - w.Buffer.Buf = append(w.Buffer.Buf, encode[val>>18&0x3F], encode[val>>12&0x3F]) - - switch remain { - case 2: - w.Buffer.Buf = append(w.Buffer.Buf, encode[val>>6&0x3F], byte(padChar)) - case 1: - w.Buffer.Buf = append(w.Buffer.Buf, byte(padChar), byte(padChar)) - } -} diff --git a/vendor/github.com/onsi/gomega/CHANGELOG.md b/vendor/github.com/onsi/gomega/CHANGELOG.md index 9f6090b8d3..79c3f61995 100644 --- a/vendor/github.com/onsi/gomega/CHANGELOG.md +++ b/vendor/github.com/onsi/gomega/CHANGELOG.md @@ -1,3 +1,18 @@ +## 1.36.1 + +### Fixes +- Fix https://github.com/onsi/gomega/issues/803 [1c6c112] +- resolves onsi/gomega#696: make HaveField great on pointer receivers given only a non-addressable value [4feb9d7] + +## 1.36.0 + +### Features +- new: make collection-related matchers Go 1.23 iterator aware [4c964c6] + +### Maintenance +- Replace min/max helpers with built-in min/max [ece6872] +- Fix some typos in docs [8e924d7] + ## 1.35.1 ### Fixes diff --git a/vendor/github.com/onsi/gomega/gomega_dsl.go b/vendor/github.com/onsi/gomega/gomega_dsl.go index 1038d7dd41..c6ac499f70 100644 --- a/vendor/github.com/onsi/gomega/gomega_dsl.go +++ b/vendor/github.com/onsi/gomega/gomega_dsl.go @@ -22,7 +22,7 @@ import ( "github.com/onsi/gomega/types" ) -const GOMEGA_VERSION = "1.35.1" +const GOMEGA_VERSION = "1.36.1" const nilGomegaPanic = `You are trying to make an assertion, but haven't registered Gomega's fail handler. If you're using Ginkgo then you probably forgot to put your assertion in an It(). diff --git a/vendor/github.com/onsi/gomega/matchers/be_empty_matcher.go b/vendor/github.com/onsi/gomega/matchers/be_empty_matcher.go index 527c1a1c10..bd7f0b96e7 100644 --- a/vendor/github.com/onsi/gomega/matchers/be_empty_matcher.go +++ b/vendor/github.com/onsi/gomega/matchers/be_empty_matcher.go @@ -4,17 +4,31 @@ package matchers import ( "fmt" + "reflect" "github.com/onsi/gomega/format" + "github.com/onsi/gomega/matchers/internal/miter" ) type BeEmptyMatcher struct { } func (matcher *BeEmptyMatcher) Match(actual interface{}) (success bool, err error) { + // short-circuit the iterator case, as we only need to see the first + // element, if any. + if miter.IsIter(actual) { + var length int + if miter.IsSeq2(actual) { + miter.IterateKV(actual, func(k, v reflect.Value) bool { length++; return false }) + } else { + miter.IterateV(actual, func(v reflect.Value) bool { length++; return false }) + } + return length == 0, nil + } + length, ok := lengthOf(actual) if !ok { - return false, fmt.Errorf("BeEmpty matcher expects a string/array/map/channel/slice. Got:\n%s", format.Object(actual, 1)) + return false, fmt.Errorf("BeEmpty matcher expects a string/array/map/channel/slice/iterator. Got:\n%s", format.Object(actual, 1)) } return length == 0, nil diff --git a/vendor/github.com/onsi/gomega/matchers/consist_of.go b/vendor/github.com/onsi/gomega/matchers/consist_of.go index f69037a4f0..a111881825 100644 --- a/vendor/github.com/onsi/gomega/matchers/consist_of.go +++ b/vendor/github.com/onsi/gomega/matchers/consist_of.go @@ -7,6 +7,7 @@ import ( "reflect" "github.com/onsi/gomega/format" + "github.com/onsi/gomega/matchers/internal/miter" "github.com/onsi/gomega/matchers/support/goraph/bipartitegraph" ) @@ -17,8 +18,8 @@ type ConsistOfMatcher struct { } func (matcher *ConsistOfMatcher) Match(actual interface{}) (success bool, err error) { - if !isArrayOrSlice(actual) && !isMap(actual) { - return false, fmt.Errorf("ConsistOf matcher expects an array/slice/map. Got:\n%s", format.Object(actual, 1)) + if !isArrayOrSlice(actual) && !isMap(actual) && !miter.IsIter(actual) { + return false, fmt.Errorf("ConsistOf matcher expects an array/slice/map/iter.Seq/iter.Seq2. Got:\n%s", format.Object(actual, 1)) } matchers := matchers(matcher.Elements) @@ -60,10 +61,21 @@ func equalMatchersToElements(matchers []interface{}) (elements []interface{}) { } func flatten(elems []interface{}) []interface{} { - if len(elems) != 1 || !isArrayOrSlice(elems[0]) { + if len(elems) != 1 || + !(isArrayOrSlice(elems[0]) || + (miter.IsIter(elems[0]) && !miter.IsSeq2(elems[0]))) { return elems } + if miter.IsIter(elems[0]) { + flattened := []any{} + miter.IterateV(elems[0], func(v reflect.Value) bool { + flattened = append(flattened, v.Interface()) + return true + }) + return flattened + } + value := reflect.ValueOf(elems[0]) flattened := make([]interface{}, value.Len()) for i := 0; i < value.Len(); i++ { @@ -116,7 +128,19 @@ func presentable(elems []interface{}) interface{} { func valuesOf(actual interface{}) []interface{} { value := reflect.ValueOf(actual) values := []interface{}{} - if isMap(actual) { + if miter.IsIter(actual) { + if miter.IsSeq2(actual) { + miter.IterateKV(actual, func(k, v reflect.Value) bool { + values = append(values, v.Interface()) + return true + }) + } else { + miter.IterateV(actual, func(v reflect.Value) bool { + values = append(values, v.Interface()) + return true + }) + } + } else if isMap(actual) { keys := value.MapKeys() for i := 0; i < value.Len(); i++ { values = append(values, value.MapIndex(keys[i]).Interface()) diff --git a/vendor/github.com/onsi/gomega/matchers/contain_element_matcher.go b/vendor/github.com/onsi/gomega/matchers/contain_element_matcher.go index 3d45c9ebc6..830239c7b6 100644 --- a/vendor/github.com/onsi/gomega/matchers/contain_element_matcher.go +++ b/vendor/github.com/onsi/gomega/matchers/contain_element_matcher.go @@ -8,6 +8,7 @@ import ( "reflect" "github.com/onsi/gomega/format" + "github.com/onsi/gomega/matchers/internal/miter" ) type ContainElementMatcher struct { @@ -16,16 +17,18 @@ type ContainElementMatcher struct { } func (matcher *ContainElementMatcher) Match(actual interface{}) (success bool, err error) { - if !isArrayOrSlice(actual) && !isMap(actual) { - return false, fmt.Errorf("ContainElement matcher expects an array/slice/map. Got:\n%s", format.Object(actual, 1)) + if !isArrayOrSlice(actual) && !isMap(actual) && !miter.IsIter(actual) { + return false, fmt.Errorf("ContainElement matcher expects an array/slice/map/iterator. Got:\n%s", format.Object(actual, 1)) } var actualT reflect.Type var result reflect.Value - switch l := len(matcher.Result); { - case l > 1: + switch numResultArgs := len(matcher.Result); { + case numResultArgs > 1: return false, errors.New("ContainElement matcher expects at most a single optional pointer to store its findings at") - case l == 1: + case numResultArgs == 1: + // Check the optional result arg to point to a single value/array/slice/map + // of a type compatible with the actual value. if reflect.ValueOf(matcher.Result[0]).Kind() != reflect.Ptr { return false, fmt.Errorf("ContainElement matcher expects a non-nil pointer to store its findings at. Got\n%s", format.Object(matcher.Result[0], 1)) @@ -34,93 +37,209 @@ func (matcher *ContainElementMatcher) Match(actual interface{}) (success bool, e resultReference := matcher.Result[0] result = reflect.ValueOf(resultReference).Elem() // what ResultReference points to, to stash away our findings switch result.Kind() { - case reflect.Array: + case reflect.Array: // result arrays are not supported, as they cannot be dynamically sized. + if miter.IsIter(actual) { + _, actualvT := miter.IterKVTypes(actual) + return false, fmt.Errorf("ContainElement cannot return findings. Need *%s, got *%s", + reflect.SliceOf(actualvT), result.Type().String()) + } return false, fmt.Errorf("ContainElement cannot return findings. Need *%s, got *%s", reflect.SliceOf(actualT.Elem()).String(), result.Type().String()) - case reflect.Slice: - if !isArrayOrSlice(actual) { + + case reflect.Slice: // result slice + // can we assign elements in actual to elements in what the result + // arg points to? + // - ✔ actual is an array or slice + // - ✔ actual is an iter.Seq producing "v" elements + // - ✔ actual is an iter.Seq2 producing "v" elements, ignoring + // the "k" elements. + switch { + case isArrayOrSlice(actual): + if !actualT.Elem().AssignableTo(result.Type().Elem()) { + return false, fmt.Errorf("ContainElement cannot return findings. Need *%s, got *%s", + actualT.String(), result.Type().String()) + } + + case miter.IsIter(actual): + _, actualvT := miter.IterKVTypes(actual) + if !actualvT.AssignableTo(result.Type().Elem()) { + return false, fmt.Errorf("ContainElement cannot return findings. Need *%s, got *%s", + actualvT.String(), result.Type().String()) + } + + default: // incompatible result reference return false, fmt.Errorf("ContainElement cannot return findings. Need *%s, got *%s", reflect.MapOf(actualT.Key(), actualT.Elem()).String(), result.Type().String()) } - if !actualT.Elem().AssignableTo(result.Type().Elem()) { - return false, fmt.Errorf("ContainElement cannot return findings. Need *%s, got *%s", - actualT.String(), result.Type().String()) - } - case reflect.Map: - if !isMap(actual) { - return false, fmt.Errorf("ContainElement cannot return findings. Need *%s, got *%s", - actualT.String(), result.Type().String()) - } - if !actualT.AssignableTo(result.Type()) { + + case reflect.Map: // result map + // can we assign elements in actual to elements in what the result + // arg points to? + // - ✔ actual is a map + // - ✔ actual is an iter.Seq2 (iter.Seq doesn't fit though) + switch { + case isMap(actual): + if !actualT.AssignableTo(result.Type()) { + return false, fmt.Errorf("ContainElement cannot return findings. Need *%s, got *%s", + actualT.String(), result.Type().String()) + } + + case miter.IsIter(actual): + actualkT, actualvT := miter.IterKVTypes(actual) + if actualkT == nil { + return false, fmt.Errorf("ContainElement cannot return findings. Need *%s, got *%s", + reflect.SliceOf(actualvT).String(), result.Type().String()) + } + if !reflect.MapOf(actualkT, actualvT).AssignableTo(result.Type()) { + return false, fmt.Errorf("ContainElement cannot return findings. Need *%s, got *%s", + reflect.MapOf(actualkT, actualvT), result.Type().String()) + } + + default: // incompatible result reference return false, fmt.Errorf("ContainElement cannot return findings. Need *%s, got *%s", actualT.String(), result.Type().String()) } + default: - if !actualT.Elem().AssignableTo(result.Type()) { - return false, fmt.Errorf("ContainElement cannot return findings. Need *%s, got *%s", - actualT.Elem().String(), result.Type().String()) + // can we assign a (single) element in actual to what the result arg + // points to? + switch { + case miter.IsIter(actual): + _, actualvT := miter.IterKVTypes(actual) + if !actualvT.AssignableTo(result.Type()) { + return false, fmt.Errorf("ContainElement cannot return findings. Need *%s, got *%s", + actualvT.String(), result.Type().String()) + } + default: + if !actualT.Elem().AssignableTo(result.Type()) { + return false, fmt.Errorf("ContainElement cannot return findings. Need *%s, got *%s", + actualT.Elem().String(), result.Type().String()) + } } } } + // If the supplied matcher isn't an Omega matcher, default to the Equal + // matcher. elemMatcher, elementIsMatcher := matcher.Element.(omegaMatcher) if !elementIsMatcher { elemMatcher = &EqualMatcher{Expected: matcher.Element} } value := reflect.ValueOf(actual) - var valueAt func(int) interface{} - var getFindings func() reflect.Value - var foundAt func(int) + var getFindings func() reflect.Value // abstracts how the findings are collected and stored + var lastError error - if isMap(actual) { - keys := value.MapKeys() - valueAt = func(i int) interface{} { - return value.MapIndex(keys[i]).Interface() + if !miter.IsIter(actual) { + var valueAt func(int) interface{} + var foundAt func(int) + // We're dealing with an array/slice/map, so in all cases we can iterate + // over the elements in actual using indices (that can be considered + // keys in case of maps). + if isMap(actual) { + keys := value.MapKeys() + valueAt = func(i int) interface{} { + return value.MapIndex(keys[i]).Interface() + } + if result.Kind() != reflect.Invalid { + fm := reflect.MakeMap(actualT) + getFindings = func() reflect.Value { return fm } + foundAt = func(i int) { + fm.SetMapIndex(keys[i], value.MapIndex(keys[i])) + } + } + } else { + valueAt = func(i int) interface{} { + return value.Index(i).Interface() + } + if result.Kind() != reflect.Invalid { + var fsl reflect.Value + if result.Kind() == reflect.Slice { + fsl = reflect.MakeSlice(result.Type(), 0, 0) + } else { + fsl = reflect.MakeSlice(reflect.SliceOf(result.Type()), 0, 0) + } + getFindings = func() reflect.Value { return fsl } + foundAt = func(i int) { + fsl = reflect.Append(fsl, value.Index(i)) + } + } } - if result.Kind() != reflect.Invalid { - fm := reflect.MakeMap(actualT) - getFindings = func() reflect.Value { - return fm + + for i := 0; i < value.Len(); i++ { + elem := valueAt(i) + success, err := elemMatcher.Match(elem) + if err != nil { + lastError = err + continue } - foundAt = func(i int) { - fm.SetMapIndex(keys[i], value.MapIndex(keys[i])) + if success { + if result.Kind() == reflect.Invalid { + return true, nil + } + foundAt(i) } } } else { - valueAt = func(i int) interface{} { - return value.Index(i).Interface() - } + // We're dealing with an iterator as a first-class construct, so things + // are slightly different: there is no index defined as in case of + // arrays/slices/maps, just "ooooorder" + var found func(k, v reflect.Value) if result.Kind() != reflect.Invalid { - var f reflect.Value - if result.Kind() == reflect.Slice { - f = reflect.MakeSlice(result.Type(), 0, 0) + if result.Kind() == reflect.Map { + fm := reflect.MakeMap(result.Type()) + getFindings = func() reflect.Value { return fm } + found = func(k, v reflect.Value) { fm.SetMapIndex(k, v) } } else { - f = reflect.MakeSlice(reflect.SliceOf(result.Type()), 0, 0) - } - getFindings = func() reflect.Value { - return f - } - foundAt = func(i int) { - f = reflect.Append(f, value.Index(i)) + var fsl reflect.Value + if result.Kind() == reflect.Slice { + fsl = reflect.MakeSlice(result.Type(), 0, 0) + } else { + fsl = reflect.MakeSlice(reflect.SliceOf(result.Type()), 0, 0) + } + getFindings = func() reflect.Value { return fsl } + found = func(_, v reflect.Value) { fsl = reflect.Append(fsl, v) } } } - } - var lastError error - for i := 0; i < value.Len(); i++ { - elem := valueAt(i) - success, err := elemMatcher.Match(elem) - if err != nil { - lastError = err - continue + success := false + actualkT, _ := miter.IterKVTypes(actual) + if actualkT == nil { + miter.IterateV(actual, func(v reflect.Value) bool { + var err error + success, err = elemMatcher.Match(v.Interface()) + if err != nil { + lastError = err + return true // iterate on... + } + if success { + if result.Kind() == reflect.Invalid { + return false // a match and no result needed, so we're done + } + found(reflect.Value{}, v) + } + return true // iterate on... + }) + } else { + miter.IterateKV(actual, func(k, v reflect.Value) bool { + var err error + success, err = elemMatcher.Match(v.Interface()) + if err != nil { + lastError = err + return true // iterate on... + } + if success { + if result.Kind() == reflect.Invalid { + return false // a match and no result needed, so we're done + } + found(k, v) + } + return true // iterate on... + }) } - if success { - if result.Kind() == reflect.Invalid { - return true, nil - } - foundAt(i) + if success && result.Kind() == reflect.Invalid { + return true, nil } } diff --git a/vendor/github.com/onsi/gomega/matchers/contain_elements_matcher.go b/vendor/github.com/onsi/gomega/matchers/contain_elements_matcher.go index 946cd8bea5..d9fcb8b804 100644 --- a/vendor/github.com/onsi/gomega/matchers/contain_elements_matcher.go +++ b/vendor/github.com/onsi/gomega/matchers/contain_elements_matcher.go @@ -4,6 +4,7 @@ import ( "fmt" "github.com/onsi/gomega/format" + "github.com/onsi/gomega/matchers/internal/miter" "github.com/onsi/gomega/matchers/support/goraph/bipartitegraph" ) @@ -13,8 +14,8 @@ type ContainElementsMatcher struct { } func (matcher *ContainElementsMatcher) Match(actual interface{}) (success bool, err error) { - if !isArrayOrSlice(actual) && !isMap(actual) { - return false, fmt.Errorf("ContainElements matcher expects an array/slice/map. Got:\n%s", format.Object(actual, 1)) + if !isArrayOrSlice(actual) && !isMap(actual) && !miter.IsIter(actual) { + return false, fmt.Errorf("ContainElements matcher expects an array/slice/map/iter.Seq/iter.Seq2. Got:\n%s", format.Object(actual, 1)) } matchers := matchers(matcher.Elements) diff --git a/vendor/github.com/onsi/gomega/matchers/have_each_matcher.go b/vendor/github.com/onsi/gomega/matchers/have_each_matcher.go index 025b6e1ac2..4111f2b867 100644 --- a/vendor/github.com/onsi/gomega/matchers/have_each_matcher.go +++ b/vendor/github.com/onsi/gomega/matchers/have_each_matcher.go @@ -5,6 +5,7 @@ import ( "reflect" "github.com/onsi/gomega/format" + "github.com/onsi/gomega/matchers/internal/miter" ) type HaveEachMatcher struct { @@ -12,8 +13,8 @@ type HaveEachMatcher struct { } func (matcher *HaveEachMatcher) Match(actual interface{}) (success bool, err error) { - if !isArrayOrSlice(actual) && !isMap(actual) { - return false, fmt.Errorf("HaveEach matcher expects an array/slice/map. Got:\n%s", + if !isArrayOrSlice(actual) && !isMap(actual) && !miter.IsIter(actual) { + return false, fmt.Errorf("HaveEach matcher expects an array/slice/map/iter.Seq/iter.Seq2. Got:\n%s", format.Object(actual, 1)) } @@ -22,6 +23,38 @@ func (matcher *HaveEachMatcher) Match(actual interface{}) (success bool, err err elemMatcher = &EqualMatcher{Expected: matcher.Element} } + if miter.IsIter(actual) { + // rejecting the non-elements case works different for iterators as we + // don't want to fetch all elements into a slice first. + count := 0 + var success bool + var err error + if miter.IsSeq2(actual) { + miter.IterateKV(actual, func(k, v reflect.Value) bool { + count++ + success, err = elemMatcher.Match(v.Interface()) + if err != nil { + return false + } + return success + }) + } else { + miter.IterateV(actual, func(v reflect.Value) bool { + count++ + success, err = elemMatcher.Match(v.Interface()) + if err != nil { + return false + } + return success + }) + } + if count == 0 { + return false, fmt.Errorf("HaveEach matcher expects a non-empty iter.Seq/iter.Seq2. Got:\n%s", + format.Object(actual, 1)) + } + return success, err + } + value := reflect.ValueOf(actual) if value.Len() == 0 { return false, fmt.Errorf("HaveEach matcher expects a non-empty array/slice/map. Got:\n%s", @@ -40,7 +73,8 @@ func (matcher *HaveEachMatcher) Match(actual interface{}) (success bool, err err } } - // if there are no elements, then HaveEach will match. + // if we never failed then we succeed; the empty/nil cases have already been + // rejected above. for i := 0; i < value.Len(); i++ { success, err := elemMatcher.Match(valueAt(i)) if err != nil { diff --git a/vendor/github.com/onsi/gomega/matchers/have_exact_elements.go b/vendor/github.com/onsi/gomega/matchers/have_exact_elements.go index 5a236d7d69..23799f1c6f 100644 --- a/vendor/github.com/onsi/gomega/matchers/have_exact_elements.go +++ b/vendor/github.com/onsi/gomega/matchers/have_exact_elements.go @@ -2,8 +2,10 @@ package matchers import ( "fmt" + "reflect" "github.com/onsi/gomega/format" + "github.com/onsi/gomega/matchers/internal/miter" ) type mismatchFailure struct { @@ -21,17 +23,58 @@ type HaveExactElementsMatcher struct { func (matcher *HaveExactElementsMatcher) Match(actual interface{}) (success bool, err error) { matcher.resetState() - if isMap(actual) { - return false, fmt.Errorf("error") + if isMap(actual) || miter.IsSeq2(actual) { + return false, fmt.Errorf("HaveExactElements matcher doesn't work on map or iter.Seq2. Got:\n%s", format.Object(actual, 1)) } matchers := matchers(matcher.Elements) - values := valuesOf(actual) - lenMatchers := len(matchers) - lenValues := len(values) + success = true + if miter.IsIter(actual) { + // In the worst case, we need to see everything before we can give our + // verdict. The only exception is fast fail. + i := 0 + miter.IterateV(actual, func(v reflect.Value) bool { + if i >= lenMatchers { + // the iterator produces more values than we got matchers: this + // is not good. + matcher.extraIndex = i + success = false + return false + } + + elemMatcher := matchers[i].(omegaMatcher) + match, err := elemMatcher.Match(v.Interface()) + if err != nil { + matcher.mismatchFailures = append(matcher.mismatchFailures, mismatchFailure{ + index: i, + failure: err.Error(), + }) + success = false + } else if !match { + matcher.mismatchFailures = append(matcher.mismatchFailures, mismatchFailure{ + index: i, + failure: elemMatcher.FailureMessage(v.Interface()), + }) + success = false + } + i++ + return true + }) + if i < len(matchers) { + // the iterator produced less values than we got matchers: this is + // no good, no no no. + matcher.missingIndex = i + success = false + } + return success, nil + } + + values := valuesOf(actual) + lenValues := len(values) + for i := 0; i < lenMatchers || i < lenValues; i++ { if i >= lenMatchers { matcher.extraIndex = i diff --git a/vendor/github.com/onsi/gomega/matchers/have_field.go b/vendor/github.com/onsi/gomega/matchers/have_field.go index 8dd3f871a8..293457e85e 100644 --- a/vendor/github.com/onsi/gomega/matchers/have_field.go +++ b/vendor/github.com/onsi/gomega/matchers/have_field.go @@ -40,7 +40,12 @@ func extractField(actual interface{}, field string, matchername string) (any, er extractedValue = actualValue.Addr().MethodByName(strings.TrimSuffix(fields[0], "()")) } if extractedValue == (reflect.Value{}) { - return nil, missingFieldError(fmt.Sprintf("%s could not find method named '%s' in struct of type %T.", matchername, fields[0], actual)) + ptr := reflect.New(actualValue.Type()) + ptr.Elem().Set(actualValue) + extractedValue = ptr.MethodByName(strings.TrimSuffix(fields[0], "()")) + if extractedValue == (reflect.Value{}) { + return nil, missingFieldError(fmt.Sprintf("%s could not find method named '%s' in struct of type %T.", matchername, fields[0], actual)) + } } t := extractedValue.Type() if t.NumIn() != 0 || t.NumOut() != 1 { diff --git a/vendor/github.com/onsi/gomega/matchers/have_key_matcher.go b/vendor/github.com/onsi/gomega/matchers/have_key_matcher.go index 00cffec70e..b62ee93cb0 100644 --- a/vendor/github.com/onsi/gomega/matchers/have_key_matcher.go +++ b/vendor/github.com/onsi/gomega/matchers/have_key_matcher.go @@ -7,6 +7,7 @@ import ( "reflect" "github.com/onsi/gomega/format" + "github.com/onsi/gomega/matchers/internal/miter" ) type HaveKeyMatcher struct { @@ -14,8 +15,8 @@ type HaveKeyMatcher struct { } func (matcher *HaveKeyMatcher) Match(actual interface{}) (success bool, err error) { - if !isMap(actual) { - return false, fmt.Errorf("HaveKey matcher expects a map. Got:%s", format.Object(actual, 1)) + if !isMap(actual) && !miter.IsSeq2(actual) { + return false, fmt.Errorf("HaveKey matcher expects a map/iter.Seq2. Got:%s", format.Object(actual, 1)) } keyMatcher, keyIsMatcher := matcher.Key.(omegaMatcher) @@ -23,6 +24,20 @@ func (matcher *HaveKeyMatcher) Match(actual interface{}) (success bool, err erro keyMatcher = &EqualMatcher{Expected: matcher.Key} } + if miter.IsSeq2(actual) { + var success bool + var err error + miter.IterateKV(actual, func(k, v reflect.Value) bool { + success, err = keyMatcher.Match(k.Interface()) + if err != nil { + err = fmt.Errorf("HaveKey's key matcher failed with:\n%s%s", format.Indent, err.Error()) + return false + } + return !success + }) + return success, err + } + keys := reflect.ValueOf(actual).MapKeys() for i := 0; i < len(keys); i++ { success, err := keyMatcher.Match(keys[i].Interface()) diff --git a/vendor/github.com/onsi/gomega/matchers/have_key_with_value_matcher.go b/vendor/github.com/onsi/gomega/matchers/have_key_with_value_matcher.go index 4c59168047..3d608f63eb 100644 --- a/vendor/github.com/onsi/gomega/matchers/have_key_with_value_matcher.go +++ b/vendor/github.com/onsi/gomega/matchers/have_key_with_value_matcher.go @@ -7,6 +7,7 @@ import ( "reflect" "github.com/onsi/gomega/format" + "github.com/onsi/gomega/matchers/internal/miter" ) type HaveKeyWithValueMatcher struct { @@ -15,8 +16,8 @@ type HaveKeyWithValueMatcher struct { } func (matcher *HaveKeyWithValueMatcher) Match(actual interface{}) (success bool, err error) { - if !isMap(actual) { - return false, fmt.Errorf("HaveKeyWithValue matcher expects a map. Got:%s", format.Object(actual, 1)) + if !isMap(actual) && !miter.IsSeq2(actual) { + return false, fmt.Errorf("HaveKeyWithValue matcher expects a map/iter.Seq2. Got:%s", format.Object(actual, 1)) } keyMatcher, keyIsMatcher := matcher.Key.(omegaMatcher) @@ -29,6 +30,27 @@ func (matcher *HaveKeyWithValueMatcher) Match(actual interface{}) (success bool, valueMatcher = &EqualMatcher{Expected: matcher.Value} } + if miter.IsSeq2(actual) { + var success bool + var err error + miter.IterateKV(actual, func(k, v reflect.Value) bool { + success, err = keyMatcher.Match(k.Interface()) + if err != nil { + err = fmt.Errorf("HaveKey's key matcher failed with:\n%s%s", format.Indent, err.Error()) + return false + } + if success { + success, err = valueMatcher.Match(v.Interface()) + if err != nil { + err = fmt.Errorf("HaveKeyWithValue's value matcher failed with:\n%s%s", format.Indent, err.Error()) + return false + } + } + return !success + }) + return success, err + } + keys := reflect.ValueOf(actual).MapKeys() for i := 0; i < len(keys); i++ { success, err := keyMatcher.Match(keys[i].Interface()) diff --git a/vendor/github.com/onsi/gomega/matchers/have_len_matcher.go b/vendor/github.com/onsi/gomega/matchers/have_len_matcher.go index ee4276189d..ca25713fe2 100644 --- a/vendor/github.com/onsi/gomega/matchers/have_len_matcher.go +++ b/vendor/github.com/onsi/gomega/matchers/have_len_matcher.go @@ -13,7 +13,7 @@ type HaveLenMatcher struct { func (matcher *HaveLenMatcher) Match(actual interface{}) (success bool, err error) { length, ok := lengthOf(actual) if !ok { - return false, fmt.Errorf("HaveLen matcher expects a string/array/map/channel/slice. Got:\n%s", format.Object(actual, 1)) + return false, fmt.Errorf("HaveLen matcher expects a string/array/map/channel/slice/iterator. Got:\n%s", format.Object(actual, 1)) } return length == matcher.Count, nil diff --git a/vendor/github.com/onsi/gomega/matchers/internal/miter/type_support_iter.go b/vendor/github.com/onsi/gomega/matchers/internal/miter/type_support_iter.go new file mode 100644 index 0000000000..d8837a4d09 --- /dev/null +++ b/vendor/github.com/onsi/gomega/matchers/internal/miter/type_support_iter.go @@ -0,0 +1,128 @@ +//go:build go1.23 + +package miter + +import ( + "reflect" +) + +// HasIterators always returns false for Go versions before 1.23. +func HasIterators() bool { return true } + +// IsIter returns true if the specified value is a function type that can be +// range-d over, otherwise false. +// +// We don't use reflect's CanSeq and CanSeq2 directly, as these would return +// true also for other value types that are range-able, such as integers, +// slices, et cetera. Here, we aim only at range-able (iterator) functions. +func IsIter(it any) bool { + if it == nil { // on purpose we only test for untyped nil. + return false + } + // reject all non-iterator-func values, even if they're range-able. + t := reflect.TypeOf(it) + if t.Kind() != reflect.Func { + return false + } + return t.CanSeq() || t.CanSeq2() +} + +// IterKVTypes returns the reflection types of an iterator's yield function's K +// and optional V arguments, otherwise nil K and V reflection types. +func IterKVTypes(it any) (k, v reflect.Type) { + if it == nil { + return + } + // reject all non-iterator-func values, even if they're range-able. + t := reflect.TypeOf(it) + if t.Kind() != reflect.Func { + return + } + // get the reflection types for V, and where applicable, K. + switch { + case t.CanSeq(): + v = t. /*iterator fn*/ In(0). /*yield fn*/ In(0) + case t.CanSeq2(): + yieldfn := t. /*iterator fn*/ In(0) + k = yieldfn.In(0) + v = yieldfn.In(1) + } + return +} + +// IsSeq2 returns true if the passed iterator function is compatible with +// iter.Seq2, otherwise false. +// +// IsSeq2 hides the Go 1.23+ specific reflect.Type.CanSeq2 behind a facade which +// is empty for Go versions before 1.23. +func IsSeq2(it any) bool { + if it == nil { + return false + } + t := reflect.TypeOf(it) + return t.Kind() == reflect.Func && t.CanSeq2() +} + +// isNilly returns true if v is either an untyped nil, or is a nil function (not +// necessarily an iterator function). +func isNilly(v any) bool { + if v == nil { + return true + } + rv := reflect.ValueOf(v) + return rv.Kind() == reflect.Func && rv.IsNil() +} + +// IterateV loops over the elements produced by an iterator function, passing +// the elements to the specified yield function individually and stopping only +// when either the iterator function runs out of elements or the yield function +// tell us to stop it. +// +// IterateV works very much like reflect.Value.Seq but hides the Go 1.23+ +// specific parts behind a facade which is empty for Go versions before 1.23, in +// order to simplify code maintenance for matchers when using older Go versions. +func IterateV(it any, yield func(v reflect.Value) bool) { + if isNilly(it) { + return + } + // reject all non-iterator-func values, even if they're range-able. + t := reflect.TypeOf(it) + if t.Kind() != reflect.Func || !t.CanSeq() { + return + } + // Call the specified iterator function, handing it our adaptor to call the + // specified generic reflection yield function. + reflectedYield := reflect.MakeFunc( + t. /*iterator fn*/ In(0), + func(args []reflect.Value) []reflect.Value { + return []reflect.Value{reflect.ValueOf(yield(args[0]))} + }) + reflect.ValueOf(it).Call([]reflect.Value{reflectedYield}) +} + +// IterateKV loops over the key-value elements produced by an iterator function, +// passing the elements to the specified yield function individually and +// stopping only when either the iterator function runs out of elements or the +// yield function tell us to stop it. +// +// IterateKV works very much like reflect.Value.Seq2 but hides the Go 1.23+ +// specific parts behind a facade which is empty for Go versions before 1.23, in +// order to simplify code maintenance for matchers when using older Go versions. +func IterateKV(it any, yield func(k, v reflect.Value) bool) { + if isNilly(it) { + return + } + // reject all non-iterator-func values, even if they're range-able. + t := reflect.TypeOf(it) + if t.Kind() != reflect.Func || !t.CanSeq2() { + return + } + // Call the specified iterator function, handing it our adaptor to call the + // specified generic reflection yield function. + reflectedYield := reflect.MakeFunc( + t. /*iterator fn*/ In(0), + func(args []reflect.Value) []reflect.Value { + return []reflect.Value{reflect.ValueOf(yield(args[0], args[1]))} + }) + reflect.ValueOf(it).Call([]reflect.Value{reflectedYield}) +} diff --git a/vendor/github.com/onsi/gomega/matchers/internal/miter/type_support_noiter.go b/vendor/github.com/onsi/gomega/matchers/internal/miter/type_support_noiter.go new file mode 100644 index 0000000000..4b8fcc55bd --- /dev/null +++ b/vendor/github.com/onsi/gomega/matchers/internal/miter/type_support_noiter.go @@ -0,0 +1,44 @@ +//go:build !go1.23 + +/* +Gomega matchers + +This package implements the Gomega matchers and does not typically need to be imported. +See the docs for Gomega for documentation on the matchers + +http://onsi.github.io/gomega/ +*/ + +package miter + +import "reflect" + +// HasIterators always returns false for Go versions before 1.23. +func HasIterators() bool { return false } + +// IsIter always returns false for Go versions before 1.23 as there is no +// iterator (function) pattern defined yet; see also: +// https://tip.golang.org/blog/range-functions. +func IsIter(i any) bool { return false } + +// IsSeq2 always returns false for Go versions before 1.23 as there is no +// iterator (function) pattern defined yet; see also: +// https://tip.golang.org/blog/range-functions. +func IsSeq2(it any) bool { return false } + +// IterKVTypes always returns nil reflection types for Go versions before 1.23 +// as there is no iterator (function) pattern defined yet; see also: +// https://tip.golang.org/blog/range-functions. +func IterKVTypes(i any) (k, v reflect.Type) { + return +} + +// IterateV never loops over what has been passed to it as an iterator for Go +// versions before 1.23 as there is no iterator (function) pattern defined yet; +// see also: https://tip.golang.org/blog/range-functions. +func IterateV(it any, yield func(v reflect.Value) bool) {} + +// IterateKV never loops over what has been passed to it as an iterator for Go +// versions before 1.23 as there is no iterator (function) pattern defined yet; +// see also: https://tip.golang.org/blog/range-functions. +func IterateKV(it any, yield func(k, v reflect.Value) bool) {} diff --git a/vendor/github.com/onsi/gomega/matchers/type_support.go b/vendor/github.com/onsi/gomega/matchers/type_support.go index dced2419ea..b9440ac7ad 100644 --- a/vendor/github.com/onsi/gomega/matchers/type_support.go +++ b/vendor/github.com/onsi/gomega/matchers/type_support.go @@ -15,6 +15,8 @@ import ( "encoding/json" "fmt" "reflect" + + "github.com/onsi/gomega/matchers/internal/miter" ) type omegaMatcher interface { @@ -152,6 +154,17 @@ func lengthOf(a interface{}) (int, bool) { switch reflect.TypeOf(a).Kind() { case reflect.Map, reflect.Array, reflect.String, reflect.Chan, reflect.Slice: return reflect.ValueOf(a).Len(), true + case reflect.Func: + if !miter.IsIter(a) { + return 0, false + } + var l int + if miter.IsSeq2(a) { + miter.IterateKV(a, func(k, v reflect.Value) bool { l++; return true }) + } else { + miter.IterateV(a, func(v reflect.Value) bool { l++; return true }) + } + return l, true default: return 0, false } diff --git a/vendor/github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring/LICENSE b/vendor/github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring/LICENSE new file mode 100644 index 0000000000..74e6ec6963 --- /dev/null +++ b/vendor/github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring/LICENSE @@ -0,0 +1,201 @@ +Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "{}" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright {yyyy} {name of copyright owner} + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring/register.go b/vendor/github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring/register.go new file mode 100644 index 0000000000..6f42984834 --- /dev/null +++ b/vendor/github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring/register.go @@ -0,0 +1,25 @@ +// Copyright 2018 The prometheus-operator Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package monitoring + +// GroupName is set to var instead of const, since this provides the ability for clients importing the module - +// github.com/prometheus-operator/prometheus-operator/pkg/apis to manage the operator's objects in a different +// API group +// +// Use `ldflags` in the client side, e.g.: +// go run -ldflags="-s -X github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring.GroupName=monitoring.example.com" ./example/client/. +var ( + GroupName = "monitoring.coreos.com" +) diff --git a/vendor/github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring/resource.go b/vendor/github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring/resource.go new file mode 100644 index 0000000000..e467c2bfa8 --- /dev/null +++ b/vendor/github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring/resource.go @@ -0,0 +1,95 @@ +// Copyright 2018 The prometheus-operator Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package monitoring + +import ( + "fmt" +) + +const ( + PrometheusesKind = "Prometheus" + PrometheusName = "prometheuses" + + PrometheusAgentsKind = "PrometheusAgent" + PrometheusAgentName = "prometheusagents" + + AlertmanagersKind = "Alertmanager" + AlertmanagerName = "alertmanagers" + + AlertmanagerConfigsKind = "AlertmanagerConfig" + AlertmanagerConfigName = "alertmanagerconfigs" + + ServiceMonitorsKind = "ServiceMonitor" + ServiceMonitorName = "servicemonitors" + + PodMonitorsKind = "PodMonitor" + PodMonitorName = "podmonitors" + + PrometheusRuleKind = "PrometheusRule" + PrometheusRuleName = "prometheusrules" + + ProbesKind = "Probe" + ProbeName = "probes" + + ScrapeConfigsKind = "ScrapeConfig" + ScrapeConfigName = "scrapeconfigs" + + ThanosRulersKind = "ThanosRuler" + ThanosRulerName = "thanosrulers" +) + +var resourceToKindMap = map[string]string{ + PrometheusName: PrometheusesKind, + PrometheusAgentName: PrometheusAgentsKind, + AlertmanagerName: AlertmanagersKind, + AlertmanagerConfigName: AlertmanagerConfigsKind, + ServiceMonitorName: ServiceMonitorsKind, + PodMonitorName: PodMonitorsKind, + PrometheusRuleName: PrometheusRuleKind, + ProbeName: ProbesKind, + ScrapeConfigName: ScrapeConfigsKind, + ThanosRulerName: ThanosRulersKind, +} + +var kindToResource = map[string]string{ + PrometheusesKind: PrometheusName, + PrometheusAgentsKind: PrometheusAgentName, + AlertmanagersKind: AlertmanagerName, + AlertmanagerConfigsKind: AlertmanagerConfigName, + ServiceMonitorsKind: ServiceMonitorName, + PodMonitorsKind: PodMonitorName, + PrometheusRuleKind: PrometheusRuleName, + ProbesKind: ProbeName, + ScrapeConfigsKind: ScrapeConfigName, + ThanosRulersKind: ThanosRulerName, +} + +// KindToResource returns the resource name corresponding to the given kind. +func KindToResource(k string) string { + kind, found := kindToResource[k] + if !found { + panic(fmt.Sprintf("failed to map kind %q to a resource name", k)) + } + return kind +} + +// ResourceToKind returns the kind corresponding to the given resource name. +func ResourceToKind(r string) string { + kind, found := resourceToKindMap[r] + if !found { + panic(fmt.Sprintf("failed to map resource %q to a kind", r)) + } + return kind +} diff --git a/vendor/github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring/v1/alertmanager_types.go b/vendor/github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring/v1/alertmanager_types.go new file mode 100644 index 0000000000..9254d1a4b0 --- /dev/null +++ b/vendor/github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring/v1/alertmanager_types.go @@ -0,0 +1,773 @@ +// Copyright 2018 The prometheus-operator Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package v1 + +import ( + appsv1 "k8s.io/api/apps/v1" + v1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" +) + +const ( + AlertmanagersKind = "Alertmanager" + AlertmanagerName = "alertmanagers" + AlertManagerKindKey = "alertmanager" +) + +// +genclient +// +k8s:openapi-gen=true +// +kubebuilder:resource:categories="prometheus-operator",shortName="am" +// +kubebuilder:printcolumn:name="Version",type="string",JSONPath=".spec.version",description="The version of Alertmanager" +// +kubebuilder:printcolumn:name="Replicas",type="integer",JSONPath=".spec.replicas",description="The number of desired replicas" +// +kubebuilder:printcolumn:name="Ready",type="integer",JSONPath=".status.availableReplicas",description="The number of ready replicas" +// +kubebuilder:printcolumn:name="Reconciled",type="string",JSONPath=".status.conditions[?(@.type == 'Reconciled')].status" +// +kubebuilder:printcolumn:name="Available",type="string",JSONPath=".status.conditions[?(@.type == 'Available')].status" +// +kubebuilder:printcolumn:name="Age",type="date",JSONPath=".metadata.creationTimestamp" +// +kubebuilder:printcolumn:name="Paused",type="boolean",JSONPath=".status.paused",description="Whether the resource reconciliation is paused or not",priority=1 +// +kubebuilder:subresource:status +// +kubebuilder:subresource:scale:specpath=.spec.replicas,statuspath=.status.replicas,selectorpath=.status.selector +// +genclient:method=GetScale,verb=get,subresource=scale,result=k8s.io/api/autoscaling/v1.Scale +// +genclient:method=UpdateScale,verb=update,subresource=scale,input=k8s.io/api/autoscaling/v1.Scale,result=k8s.io/api/autoscaling/v1.Scale + +// The `Alertmanager` custom resource definition (CRD) defines a desired [Alertmanager](https://prometheus.io/docs/alerting) setup to run in a Kubernetes cluster. It allows to specify many options such as the number of replicas, persistent storage and many more. +// +// For each `Alertmanager` resource, the Operator deploys a `StatefulSet` in the same namespace. When there are two or more configured replicas, the Operator runs the Alertmanager instances in high-availability mode. +// +// The resource defines via label and namespace selectors which `AlertmanagerConfig` objects should be associated to the deployed Alertmanager instances. +type Alertmanager struct { + // TypeMeta defines the versioned schema of this representation of an object. + // +optional + metav1.TypeMeta `json:",inline"` + // metadata defines ObjectMeta as the metadata that all persisted resources. + // +optional + metav1.ObjectMeta `json:"metadata,omitempty"` + // spec defines the specification of the desired behavior of the Alertmanager cluster. More info: + // https://github.com/kubernetes/community/blob/master/contributors/devel/sig-architecture/api-conventions.md#spec-and-status + // +required + Spec AlertmanagerSpec `json:"spec"` + // status defines the most recent observed status of the Alertmanager cluster. Read-only. + // More info: + // https://github.com/kubernetes/community/blob/master/contributors/devel/sig-architecture/api-conventions.md#spec-and-status + // +optional + Status AlertmanagerStatus `json:"status,omitempty"` +} + +// DeepCopyObject implements the runtime.Object interface. +func (l *Alertmanager) DeepCopyObject() runtime.Object { + return l.DeepCopy() +} + +// AlertmanagerSpec is a specification of the desired behavior of the Alertmanager cluster. More info: +// https://github.com/kubernetes/community/blob/master/contributors/devel/sig-architecture/api-conventions.md#spec-and-status +// +k8s:openapi-gen=true +type AlertmanagerSpec struct { + // podMetadata defines labels and annotations which are propagated to the Alertmanager pods. + // + // The following items are reserved and cannot be overridden: + // * "alertmanager" label, set to the name of the Alertmanager instance. + // * "app.kubernetes.io/instance" label, set to the name of the Alertmanager instance. + // * "app.kubernetes.io/managed-by" label, set to "prometheus-operator". + // * "app.kubernetes.io/name" label, set to "alertmanager". + // * "app.kubernetes.io/version" label, set to the Alertmanager version. + // * "kubectl.kubernetes.io/default-container" annotation, set to "alertmanager". + // +optional + PodMetadata *EmbeddedObjectMetadata `json:"podMetadata,omitempty"` + // image if specified has precedence over baseImage, tag and sha + // combinations. Specifying the version is still necessary to ensure the + // Prometheus Operator knows what version of Alertmanager is being + // configured. + // +optional + Image *string `json:"image,omitempty"` + // imagePullPolicy for the 'alertmanager', 'init-config-reloader' and 'config-reloader' containers. + // See https://kubernetes.io/docs/concepts/containers/images/#image-pull-policy for more details. + // +kubebuilder:validation:Enum="";Always;Never;IfNotPresent + // +optional + ImagePullPolicy v1.PullPolicy `json:"imagePullPolicy,omitempty"` + // version the cluster should be on. + // +optional + Version string `json:"version,omitempty"` + // tag of Alertmanager container image to be deployed. Defaults to the value of `version`. + // Version is ignored if Tag is set. + // Deprecated: use 'image' instead. The image tag can be specified as part of the image URL. + // +optional + Tag string `json:"tag,omitempty"` + // sha of Alertmanager container image to be deployed. Defaults to the value of `version`. + // Similar to a tag, but the SHA explicitly deploys an immutable container image. + // Version and Tag are ignored if SHA is set. + // Deprecated: use 'image' instead. The image digest can be specified as part of the image URL. + // +optional + SHA string `json:"sha,omitempty"` + // baseImage that is used to deploy pods, without tag. + // Deprecated: use 'image' instead. + // +optional + BaseImage string `json:"baseImage,omitempty"` + // imagePullSecrets An optional list of references to secrets in the same namespace + // to use for pulling prometheus and alertmanager images from registries + // see https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ + // +optional + ImagePullSecrets []v1.LocalObjectReference `json:"imagePullSecrets,omitempty"` + // secrets is a list of Secrets in the same namespace as the Alertmanager + // object, which shall be mounted into the Alertmanager Pods. + // Each Secret is added to the StatefulSet definition as a volume named `secret-`. + // The Secrets are mounted into `/etc/alertmanager/secrets/` in the 'alertmanager' container. + // +optional + Secrets []string `json:"secrets,omitempty"` + // configMaps defines a list of ConfigMaps in the same namespace as the Alertmanager + // object, which shall be mounted into the Alertmanager Pods. + // Each ConfigMap is added to the StatefulSet definition as a volume named `configmap-`. + // The ConfigMaps are mounted into `/etc/alertmanager/configmaps/` in the 'alertmanager' container. + // +optional + ConfigMaps []string `json:"configMaps,omitempty"` + // configSecret defines the name of a Kubernetes Secret in the same namespace as the + // Alertmanager object, which contains the configuration for this Alertmanager + // instance. If empty, it defaults to `alertmanager-`. + // + // The Alertmanager configuration should be available under the + // `alertmanager.yaml` key. Additional keys from the original secret are + // copied to the generated secret and mounted into the + // `/etc/alertmanager/config` directory in the `alertmanager` container. + // + // If either the secret or the `alertmanager.yaml` key is missing, the + // operator provisions a minimal Alertmanager configuration with one empty + // receiver (effectively dropping alert notifications). + // +optional + ConfigSecret string `json:"configSecret,omitempty"` + // logLevel for Alertmanager to be configured with. + // +kubebuilder:validation:Enum="";debug;info;warn;error + // +optional + LogLevel string `json:"logLevel,omitempty"` + // logFormat for Alertmanager to be configured with. + // +kubebuilder:validation:Enum="";logfmt;json + // +optional + LogFormat string `json:"logFormat,omitempty"` + // replicas defines the expected size of the alertmanager cluster. The controller will + // eventually make the size of the running cluster equal to the expected + // size. + // +optional + Replicas *int32 `json:"replicas,omitempty"` + // retention defines the time duration Alertmanager shall retain data for. Default is '120h', + // and must match the regular expression `[0-9]+(ms|s|m|h)` (milliseconds seconds minutes hours). + // +kubebuilder:default:="120h" + // +optional + Retention GoDuration `json:"retention,omitempty"` + // storage defines the definition of how storage will be used by the Alertmanager + // instances. + // +optional + Storage *StorageSpec `json:"storage,omitempty"` + // volumes allows configuration of additional volumes on the output StatefulSet definition. + // Volumes specified will be appended to other volumes that are generated as a result of + // StorageSpec objects. + // +optional + Volumes []v1.Volume `json:"volumes,omitempty"` + // volumeMounts allows configuration of additional VolumeMounts on the output StatefulSet definition. + // VolumeMounts specified will be appended to other VolumeMounts in the alertmanager container, + // that are generated as a result of StorageSpec objects. + // +optional + VolumeMounts []v1.VolumeMount `json:"volumeMounts,omitempty"` + // persistentVolumeClaimRetentionPolicy controls if and how PVCs are deleted during the lifecycle of a StatefulSet. + // The default behavior is all PVCs are retained. + // This is an alpha field from kubernetes 1.23 until 1.26 and a beta field from 1.26. + // It requires enabling the StatefulSetAutoDeletePVC feature gate. + // + // +optional + PersistentVolumeClaimRetentionPolicy *appsv1.StatefulSetPersistentVolumeClaimRetentionPolicy `json:"persistentVolumeClaimRetentionPolicy,omitempty"` + // externalUrl defines the URL used to access the Alertmanager web service. This is + // necessary to generate correct URLs. This is necessary if Alertmanager is not + // served from root of a DNS name. + // +optional + ExternalURL string `json:"externalUrl,omitempty"` + // routePrefix Alertmanager registers HTTP handlers for. This is useful, + // if using ExternalURL and a proxy is rewriting HTTP routes of a request, + // and the actual ExternalURL is still true, but the server serves requests + // under a different route prefix. For example for use with `kubectl proxy`. + // +optional + RoutePrefix string `json:"routePrefix,omitempty"` + // paused if set to true all actions on the underlying managed objects are not + // going to be performed, except for delete actions. + // +optional + Paused bool `json:"paused,omitempty"` // nolint:kubeapilinter + // nodeSelector defines which Nodes the Pods are scheduled on. + // +optional + //nolint:kubeapilinter // standard Kubernetes node selector format + NodeSelector map[string]string `json:"nodeSelector,omitempty"` + // resources defines the resource requests and limits of the Pods. + // +optional + Resources v1.ResourceRequirements `json:"resources,omitempty"` + // affinity defines the pod's scheduling constraints. + // +optional + Affinity *v1.Affinity `json:"affinity,omitempty"` + // tolerations defines the pod's tolerations. + // +optional + Tolerations []v1.Toleration `json:"tolerations,omitempty"` + // topologySpreadConstraints defines the Pod's topology spread constraints. + // +optional + TopologySpreadConstraints []v1.TopologySpreadConstraint `json:"topologySpreadConstraints,omitempty"` + // securityContext holds pod-level security attributes and common container settings. + // This defaults to the default PodSecurityContext. + // +optional + SecurityContext *v1.PodSecurityContext `json:"securityContext,omitempty"` + // dnsPolicy defines the DNS policy for the pods. + // + // +optional + DNSPolicy *DNSPolicy `json:"dnsPolicy,omitempty"` + // dnsConfig defines the DNS configuration for the pods. + // + // +optional + DNSConfig *PodDNSConfig `json:"dnsConfig,omitempty"` + // enableServiceLinks defines whether information about services should be injected into pod's environment variables + // +optional + EnableServiceLinks *bool `json:"enableServiceLinks,omitempty"` // nolint:kubeapilinter + // serviceName defines the service name used by the underlying StatefulSet(s) as the governing service. + // If defined, the Service must be created before the Alertmanager resource in the same namespace and it must define a selector that matches the pod labels. + // If empty, the operator will create and manage a headless service named `alertmanager-operated` for Alertmanager resources. + // When deploying multiple Alertmanager resources in the same namespace, it is recommended to specify a different value for each. + // See https://kubernetes.io/docs/concepts/workloads/controllers/statefulset/#stable-network-id for more details. + // +optional + // +kubebuilder:validation:MinLength=1 + ServiceName *string `json:"serviceName,omitempty"` + // serviceAccountName is the name of the ServiceAccount to use to run the + // Prometheus Pods. + // +optional + ServiceAccountName string `json:"serviceAccountName,omitempty"` + // listenLocal defines the Alertmanager server listen on loopback, so that it + // does not bind against the Pod IP. Note this is only for the Alertmanager + // UI, not the gossip communication. + // +optional + ListenLocal bool `json:"listenLocal,omitempty"` // nolint:kubeapilinter + + // podManagementPolicy defines the policy for creating/deleting pods when + // scaling up and down. + // + // Unlike the default StatefulSet behavior, the default policy is + // `Parallel` to avoid manual intervention in case a pod gets stuck during + // a rollout. + // + // Note that updating this value implies the recreation of the StatefulSet + // which incurs a service outage. + // + // +optional + PodManagementPolicy *PodManagementPolicyType `json:"podManagementPolicy,omitempty"` + + // updateStrategy indicates the strategy that will be employed to update + // Pods in the StatefulSet when a revision is made to statefulset's Pod + // Template. + // + // The default strategy is RollingUpdate. + // + // +optional + UpdateStrategy *StatefulSetUpdateStrategy `json:"updateStrategy,omitempty"` + + // containers allows injecting additional containers. This is meant to + // allow adding an authentication proxy to an Alertmanager pod. + // Containers described here modify an operator generated container if they + // share the same name and modifications are done via a strategic merge + // patch. The current container names are: `alertmanager` and + // `config-reloader`. Overriding containers is entirely outside the scope + // of what the maintainers will support and by doing so, you accept that + // this behaviour may break at any time without notice. + // +optional + Containers []v1.Container `json:"containers,omitempty"` + // initContainers allows adding initContainers to the pod definition. Those can be used to e.g. + // fetch secrets for injection into the Alertmanager configuration from external sources. Any + // errors during the execution of an initContainer will lead to a restart of the Pod. More info: https://kubernetes.io/docs/concepts/workloads/pods/init-containers/ + // InitContainers described here modify an operator + // generated init containers if they share the same name and modifications are + // done via a strategic merge patch. The current init container name is: + // `init-config-reloader`. Overriding init containers is entirely outside the + // scope of what the maintainers will support and by doing so, you accept that + // this behaviour may break at any time without notice. + // +optional + InitContainers []v1.Container `json:"initContainers,omitempty"` + // priorityClassName assigned to the Pods + // +optional + PriorityClassName string `json:"priorityClassName,omitempty"` + // additionalPeers allows injecting a set of additional Alertmanagers to peer with to form a highly available cluster. + // +optional + AdditionalPeers []string `json:"additionalPeers,omitempty"` + // clusterAdvertiseAddress defines the explicit address to advertise in cluster. + // Needs to be provided for non RFC1918 [1] (public) addresses. + // [1] RFC1918: https://tools.ietf.org/html/rfc1918 + // +optional + ClusterAdvertiseAddress string `json:"clusterAdvertiseAddress,omitempty"` + // clusterGossipInterval defines the interval between gossip attempts. + // +optional + ClusterGossipInterval GoDuration `json:"clusterGossipInterval,omitempty"` + // clusterLabel defines the identifier that uniquely identifies the Alertmanager cluster. + // You should only set it when the Alertmanager cluster includes Alertmanager instances which are external to this Alertmanager resource. In practice, the addresses of the external instances are provided via the `.spec.additionalPeers` field. + // +optional + ClusterLabel *string `json:"clusterLabel,omitempty"` + // clusterPushpullInterval defines the interval between pushpull attempts. + // +optional + ClusterPushpullInterval GoDuration `json:"clusterPushpullInterval,omitempty"` + // clusterPeerTimeout defines the timeout for cluster peering. + // +optional + ClusterPeerTimeout GoDuration `json:"clusterPeerTimeout,omitempty"` + // portName defines the port's name for the pods and governing service. + // Defaults to `web`. + // +kubebuilder:default:="web" + // +optional + PortName string `json:"portName,omitempty"` + // forceEnableClusterMode ensures Alertmanager does not deactivate the cluster mode when running with a single replica. + // Use case is e.g. spanning an Alertmanager cluster across Kubernetes clusters with a single replica in each. + // +optional + ForceEnableClusterMode bool `json:"forceEnableClusterMode,omitempty"` // nolint:kubeapilinter + // alertmanagerConfigSelector defines the selector to be used for to merge and configure Alertmanager with. + // +optional + AlertmanagerConfigSelector *metav1.LabelSelector `json:"alertmanagerConfigSelector,omitempty"` + // alertmanagerConfigNamespaceSelector defines the namespaces to be selected for AlertmanagerConfig discovery. If nil, only + // check own namespace. + // +optional + AlertmanagerConfigNamespaceSelector *metav1.LabelSelector `json:"alertmanagerConfigNamespaceSelector,omitempty"` + + // alertmanagerConfigMatcherStrategy defines how AlertmanagerConfig objects + // process incoming alerts. + // +optional + AlertmanagerConfigMatcherStrategy AlertmanagerConfigMatcherStrategy `json:"alertmanagerConfigMatcherStrategy,omitempty"` + + // minReadySeconds defines the minimum number of seconds for which a newly + // created pod should be ready without any of its container crashing for it + // to be considered available. + // + // If unset, pods will be considered available as soon as they are ready. + // + // When the Alertmanager version is greater than or equal to v0.30.0, the + // duration is also used to delay the first flush of the aggregation + // groups. This delay helps ensuring that all alerts have been resent by + // the Prometheus instances to Alertmanager after a roll-out. It is + // possible to override this behavior passing a custom value via + // `.spec.additionalArgs`. + // + // +kubebuilder:validation:Minimum:=0 + // +optional + MinReadySeconds *int32 `json:"minReadySeconds,omitempty"` + // hostAliases Pods configuration + // +listType=map + // +listMapKey=ip + // +optional + HostAliases []HostAlias `json:"hostAliases,omitempty"` + // web defines the web command line flags when starting Alertmanager. + // +optional + Web *AlertmanagerWebSpec `json:"web,omitempty"` + // limits defines the limits command line flags when starting Alertmanager. + // +optional + Limits *AlertmanagerLimitsSpec `json:"limits,omitempty"` + + // clusterTLS defines the mutual TLS configuration for the Alertmanager cluster's gossip protocol. + // + // It requires Alertmanager >= 0.24.0. + // +optional + ClusterTLS *ClusterTLSConfig `json:"clusterTLS,omitempty"` + // alertmanagerConfiguration defines the configuration of Alertmanager. + // + // If defined, it takes precedence over the `configSecret` field. + // + // This is an *experimental feature*, it may change in any upcoming release + // in a breaking way. + // + // +optional + AlertmanagerConfiguration *AlertmanagerConfiguration `json:"alertmanagerConfiguration,omitempty"` + // automountServiceAccountToken defines whether a service account token should be automatically mounted in the pod. + // If the service account has `automountServiceAccountToken: true`, set the field to `false` to opt out of automounting API credentials. + // +optional + AutomountServiceAccountToken *bool `json:"automountServiceAccountToken,omitempty"` // nolint:kubeapilinter + // enableFeatures defines the Alertmanager's feature flags. By default, no features are enabled. + // Enabling features which are disabled by default is entirely outside the + // scope of what the maintainers will support and by doing so, you accept + // that this behaviour may break at any time without notice. + // + // It requires Alertmanager >= 0.27.0. + // +optional + EnableFeatures []string `json:"enableFeatures,omitempty"` + // additionalArgs allows setting additional arguments for the 'Alertmanager' container. + // It is intended for e.g. activating hidden flags which are not supported by + // the dedicated configuration options yet. The arguments are passed as-is to the + // Alertmanager container which may cause issues if they are invalid or not supported + // by the given Alertmanager version. + // +optional + AdditionalArgs []Argument `json:"additionalArgs,omitempty"` + + // terminationGracePeriodSeconds defines the Optional duration in seconds the pod needs to terminate gracefully. + // Value must be non-negative integer. The value zero indicates stop immediately via + // the kill signal (no opportunity to shut down) which may lead to data corruption. + // + // Defaults to 120 seconds. + // + // +kubebuilder:validation:Minimum:=0 + // +optional + TerminationGracePeriodSeconds *int64 `json:"terminationGracePeriodSeconds,omitempty"` + + // hostUsers supports the user space in Kubernetes. + // + // More info: https://kubernetes.io/docs/tasks/configure-pod-container/user-namespaces/ + // + // + // The feature requires at least Kubernetes 1.28 with the `UserNamespacesSupport` feature gate enabled. + // Starting Kubernetes 1.33, the feature is enabled by default. + // + // +optional + HostUsers *bool `json:"hostUsers,omitempty"` // nolint:kubeapilinter +} + +type AlertmanagerConfigMatcherStrategy struct { + // type defines the strategy used by + // AlertmanagerConfig objects to match alerts in the routes and inhibition + // rules. + // + // The default value is `OnNamespace`. + // + // +kubebuilder:validation:Enum="OnNamespace";"OnNamespaceExceptForAlertmanagerNamespace";"None" + // +kubebuilder:default:="OnNamespace" + // +optional + Type AlertmanagerConfigMatcherStrategyType `json:"type,omitempty"` +} + +type AlertmanagerConfigMatcherStrategyType string + +const ( + // With `OnNamespace`, the route and inhibition rules of an + // AlertmanagerConfig object only process alerts that have a `namespace` + // label equal to the namespace of the object. + OnNamespaceConfigMatcherStrategyType AlertmanagerConfigMatcherStrategyType = "OnNamespace" + + // With `OnNamespaceExceptForAlertmanagerNamespace`, the route and inhibition rules of an + // AlertmanagerConfig object only process alerts that have a `namespace` + // label equal to the namespace of the object, unless the AlertmanagerConfig object + // is in the same namespace as the Alertmanager object, where it will process all alerts. + OnNamespaceExceptForAlertmanagerNamespaceConfigMatcherStrategyType AlertmanagerConfigMatcherStrategyType = "OnNamespaceExceptForAlertmanagerNamespace" + + // With `None`, the route and inhibition rules of an AlertmanagerConfig + // object process all incoming alerts. + NoneConfigMatcherStrategyType AlertmanagerConfigMatcherStrategyType = "None" +) + +// AlertmanagerConfiguration defines the Alertmanager configuration. +// +k8s:openapi-gen=true +type AlertmanagerConfiguration struct { + // name defines the name of the AlertmanagerConfig custom resource which is used to generate the Alertmanager configuration. + // It must be defined in the same namespace as the Alertmanager object. + // The operator will not enforce a `namespace` label for routes and inhibition rules. + // +kubebuilder:validation:MinLength=1 + // +optional + Name string `json:"name,omitempty"` + // global defines the global parameters of the Alertmanager configuration. + // +optional + Global *AlertmanagerGlobalConfig `json:"global,omitempty"` + // templates defines the custom notification templates. + // +optional + Templates []SecretOrConfigMap `json:"templates,omitempty"` +} + +// AlertmanagerGlobalConfig configures parameters that are valid in all other configuration contexts. +// See https://prometheus.io/docs/alerting/latest/configuration/#configuration-file +type AlertmanagerGlobalConfig struct { + // smtp defines global SMTP parameters. + // +optional + SMTPConfig *GlobalSMTPConfig `json:"smtp,omitempty"` + + // resolveTimeout defines the default value used by alertmanager if the alert does + // not include EndsAt, after this time passes it can declare the alert as resolved if it has not been updated. + // This has no impact on alerts from Prometheus, as they always include EndsAt. + // +optional + ResolveTimeout Duration `json:"resolveTimeout,omitempty"` + + // httpConfig defines the default HTTP configuration. + // +optional + HTTPConfigWithProxy *HTTPConfigWithProxy `json:"httpConfig,omitempty"` + + // slackApiUrl defines the default Slack API URL. + // +optional + SlackAPIURL *v1.SecretKeySelector `json:"slackApiUrl,omitempty"` + + // opsGenieApiUrl defines the default OpsGenie API URL. + // +optional + OpsGenieAPIURL *v1.SecretKeySelector `json:"opsGenieApiUrl,omitempty"` + + // opsGenieApiKey defines the default OpsGenie API Key. + // +optional + OpsGenieAPIKey *v1.SecretKeySelector `json:"opsGenieApiKey,omitempty"` + + // pagerdutyUrl defines the default Pagerduty URL. + // +optional + PagerdutyURL *URL `json:"pagerdutyUrl,omitempty"` + + // telegram defines the default Telegram config + // +optional + TelegramConfig *GlobalTelegramConfig `json:"telegram,omitempty"` + + // jira defines the default configuration for Jira. + // +optional + JiraConfig *GlobalJiraConfig `json:"jira,omitempty"` + + // victorops defines the default configuration for VictorOps. + // +optional + VictorOpsConfig *GlobalVictorOpsConfig `json:"victorops,omitempty"` + + // rocketChat defines the default configuration for Rocket Chat. + // +optional + RocketChatConfig *GlobalRocketChatConfig `json:"rocketChat,omitempty"` + + // webex defines the default configuration for Webex. + // +optional + WebexConfig *GlobalWebexConfig `json:"webex,omitempty"` + + // wechat defines the default WeChat Config + // +optional + WeChatConfig *GlobalWeChatConfig `json:"wechat,omitempty"` +} + +// AlertmanagerStatus is the most recent observed status of the Alertmanager cluster. Read-only. +// More info: +// https://github.com/kubernetes/community/blob/master/contributors/devel/sig-architecture/api-conventions.md#spec-and-status +// +k8s:openapi-gen=true +type AlertmanagerStatus struct { + // paused defines whether any actions on the underlying managed objects are + // being performed. Only delete actions will be performed. + // +optional + Paused bool `json:"paused"` // nolint:kubeapilinter + // replicas defines the total number of non-terminated pods targeted by this Alertmanager + // object (their labels match the selector). + // +optional + Replicas int32 `json:"replicas"` + // updatedReplicas defines the total number of non-terminated pods targeted by this Alertmanager + // object that have the desired version spec. + // +optional + UpdatedReplicas int32 `json:"updatedReplicas"` + // availableReplicas defines the total number of available pods (ready for at least minReadySeconds) + // targeted by this Alertmanager cluster. + // +optional + AvailableReplicas int32 `json:"availableReplicas"` + // unavailableReplicas defines the total number of unavailable pods targeted by this Alertmanager object. + // +optional + UnavailableReplicas int32 `json:"unavailableReplicas"` + // selector used to match the pods targeted by this Alertmanager object. + // +optional + Selector string `json:"selector,omitempty"` + // conditions defines the current state of the Alertmanager object. + // +listType=map + // +listMapKey=type + // +optional + Conditions []Condition `json:"conditions,omitempty"` +} + +func (a *Alertmanager) ExpectedReplicas() int { + if a.Spec.Replicas == nil { + return 1 + } + return int(*a.Spec.Replicas) +} + +func (a *Alertmanager) SetReplicas(i int) { a.Status.Replicas = int32(i) } +func (a *Alertmanager) SetUpdatedReplicas(i int) { a.Status.UpdatedReplicas = int32(i) } +func (a *Alertmanager) SetAvailableReplicas(i int) { a.Status.AvailableReplicas = int32(i) } +func (a *Alertmanager) SetUnavailableReplicas(i int) { a.Status.UnavailableReplicas = int32(i) } + +// AlertmanagerWebSpec defines the web command line flags when starting Alertmanager. +// +k8s:openapi-gen=true +type AlertmanagerWebSpec struct { + WebConfigFileFields `json:",inline"` + // getConcurrency defines the maximum number of GET requests processed concurrently. This corresponds to the + // Alertmanager's `--web.get-concurrency` flag. + // +optional + GetConcurrency *uint32 `json:"getConcurrency,omitempty"` + // timeout for HTTP requests. This corresponds to the Alertmanager's + // `--web.timeout` flag. + // +optional + Timeout *uint32 `json:"timeout,omitempty"` +} + +// AlertmanagerLimitsSpec defines the limits command line flags when starting Alertmanager. +// +k8s:openapi-gen=true +type AlertmanagerLimitsSpec struct { + // maxSilences defines the maximum number active and pending silences. This corresponds to the + // Alertmanager's `--silences.max-silences` flag. + // It requires Alertmanager >= v0.28.0. + // + // +kubebuilder:validation:Minimum:=0 + // +optional + MaxSilences *int32 `json:"maxSilences,omitempty"` + // maxPerSilenceBytes defines the maximum size of an individual silence as stored on disk. This corresponds to the Alertmanager's + // `--silences.max-per-silence-bytes` flag. + // It requires Alertmanager >= v0.28.0. + // + // +optional + MaxPerSilenceBytes *ByteSize `json:"maxPerSilenceBytes,omitempty"` +} + +// GlobalSMTPConfig configures global SMTP parameters. +// See https://prometheus.io/docs/alerting/latest/configuration/#configuration-file +type GlobalSMTPConfig struct { + // from defines the default SMTP From header field. + // +optional + From *string `json:"from,omitempty"` + + // smartHost defines the default SMTP smarthost used for sending emails. + // +optional + SmartHost *HostPort `json:"smartHost,omitempty"` + + // hello defines the default hostname to identify to the SMTP server. + // +optional + Hello *string `json:"hello,omitempty"` + + // authUsername represents SMTP Auth using CRAM-MD5, LOGIN and PLAIN. If empty, Alertmanager doesn't authenticate to the SMTP server. + // +optional + AuthUsername *string `json:"authUsername,omitempty"` + + // authPassword represents SMTP Auth using LOGIN and PLAIN. + // +optional + AuthPassword *v1.SecretKeySelector `json:"authPassword,omitempty"` + + // authIdentity represents SMTP Auth using PLAIN + // +optional + AuthIdentity *string `json:"authIdentity,omitempty"` + + // authSecret represents SMTP Auth using CRAM-MD5. + // +optional + AuthSecret *v1.SecretKeySelector `json:"authSecret,omitempty"` + + // requireTLS defines the default SMTP TLS requirement. + // Note that Go does not support unencrypted connections to remote SMTP endpoints. + // +optional + RequireTLS *bool `json:"requireTLS,omitempty"` // nolint:kubeapilinter + + // tlsConfig defines the default TLS configuration for SMTP receivers + // +optional + TLSConfig *SafeTLSConfig `json:"tlsConfig,omitempty"` +} + +// GlobalTelegramConfig configures global Telegram parameters. +type GlobalTelegramConfig struct { + // apiURL defines he default Telegram API URL. + // + // It requires Alertmanager >= v0.24.0. + // +optional + APIURL *URL `json:"apiURL,omitempty"` +} + +// GlobalJiraConfig configures global Jira parameters. +type GlobalJiraConfig struct { + // apiURL defines the default Jira API URL. + // + // It requires Alertmanager >= v0.28.0. + // + // +optional + APIURL *URL `json:"apiURL,omitempty"` +} + +// GlobalRocketChatConfig configures global Rocket Chat parameters. +type GlobalRocketChatConfig struct { + // apiURL defines the default Rocket Chat API URL. + // + // It requires Alertmanager >= v0.28.0. + // + // +optional + APIURL *URL `json:"apiURL,omitempty"` + + // token defines the default Rocket Chat token. + // + // It requires Alertmanager >= v0.28.0. + // + // +optional + Token *v1.SecretKeySelector `json:"token,omitempty"` + + // tokenID defines the default Rocket Chat Token ID. + // + // It requires Alertmanager >= v0.28.0. + // + // +optional + TokenID *v1.SecretKeySelector `json:"tokenID,omitempty"` +} + +// GlobalWebexConfig configures global Webex parameters. +// See https://prometheus.io/docs/alerting/latest/configuration/#configuration-file +type GlobalWebexConfig struct { + // apiURL defines the is the default Webex API URL. + // + // It requires Alertmanager >= v0.25.0. + // + // +optional + APIURL *URL `json:"apiURL,omitempty"` +} + +type GlobalWeChatConfig struct { + // apiURL defines he default WeChat API URL. + // The default value is "https://qyapi.weixin.qq.com/cgi-bin/" + // +optional + APIURL *URL `json:"apiURL,omitempty"` + + // apiSecret defines the default WeChat API Secret. + // +optional + APISecret *v1.SecretKeySelector `json:"apiSecret,omitempty"` + + // apiCorpID defines the default WeChat API Corporate ID. + // +optional + // +kubebuilder:validation:MinLength=1 + APICorpID *string `json:"apiCorpID,omitempty"` +} + +// GlobalVictorOpsConfig configures global VictorOps parameters. +type GlobalVictorOpsConfig struct { + // apiURL defines the default VictorOps API URL. + // + // +optional + APIURL *URL `json:"apiURL,omitempty"` + // apiKey defines the default VictorOps API Key. + // + // +optional + APIKey *v1.SecretKeySelector `json:"apiKey,omitempty"` +} + +// HostPort represents a "host:port" network address. +type HostPort struct { + // host defines the host's address, it can be a DNS name or a literal IP address. + // +kubebuilder:validation:MinLength=1 + // +required + Host string `json:"host"` + // port defines the host's port, it can be a literal port number or a port name. + // +kubebuilder:validation:MinLength=1 + // +required + Port string `json:"port"` +} + +// AlertmanagerList is a list of Alertmanagers. +// +k8s:openapi-gen=true +type AlertmanagerList struct { + // TypeMeta defines the versioned schema of this representation of an object. + metav1.TypeMeta `json:",inline"` + // metadata defines ListMeta as metadata for collection responses. + metav1.ListMeta `json:"metadata,omitempty"` + // List of Alertmanagers + Items []Alertmanager `json:"items"` +} + +// DeepCopyObject implements the runtime.Object interface. +func (l *AlertmanagerList) DeepCopyObject() runtime.Object { + return l.DeepCopy() +} + +// ClusterTLSConfig defines the mutual TLS configuration for the Alertmanager cluster TLS protocol. +// +k8s:openapi-gen=true +type ClusterTLSConfig struct { + // server defines the server-side configuration for mutual TLS. + // +required + ServerTLS WebTLSConfig `json:"server"` + // client defines the client-side configuration for mutual TLS. + // +required + ClientTLS SafeTLSConfig `json:"client"` +} + +// URL represents a valid URL +// +kubebuilder:validation:Pattern:="^(http|https)://.+$" +type URL string diff --git a/vendor/github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring/v1/dns_types.go b/vendor/github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring/v1/dns_types.go new file mode 100644 index 0000000000..d68b698312 --- /dev/null +++ b/vendor/github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring/v1/dns_types.go @@ -0,0 +1,83 @@ +// Copyright 2024 The prometheus-operator Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +package v1 + +// PodDNSConfig defines the DNS parameters of a pod in addition to +// those generated from DNSPolicy. +type PodDNSConfig struct { + // nameservers defines the list of DNS name server IP addresses. + // This will be appended to the base nameservers generated from DNSPolicy. + // +optional + // +listType:=set + // +kubebuilder:validation:items:MinLength:=1 + Nameservers []string `json:"nameservers,omitempty"` + + // searches defines the list of DNS search domains for host-name lookup. + // This will be appended to the base search paths generated from DNSPolicy. + // +optional + // +listType:=set + // +kubebuilder:validation:items:MinLength:=1 + Searches []string `json:"searches,omitempty"` + + // options defines the list of DNS resolver options. + // This will be merged with the base options generated from DNSPolicy. + // Resolution options given in Options + // will override those that appear in the base DNSPolicy. + // +optional + // +listType=map + // +listMapKey=name + Options []PodDNSConfigOption `json:"options,omitempty"` +} + +// PodDNSConfigOption defines DNS resolver options of a pod. +type PodDNSConfigOption struct { + // name is required and must be unique. + // +kubebuilder:validation:MinLength=1 + // +required + Name string `json:"name"` + + // value is optional. + // +optional + Value *string `json:"value,omitempty"` +} + +// DNSPolicy specifies the DNS policy for the pod. +// +kubebuilder:validation:Enum=ClusterFirstWithHostNet;ClusterFirst;Default;None +type DNSPolicy string + +const ( + // DNSClusterFirstWithHostNet defines that the pod should use cluster DNS + // first, if it is available, then fall back on the default + // (as determined by kubelet) DNS settings. + DNSClusterFirstWithHostNet DNSPolicy = "ClusterFirstWithHostNet" + + // DNSClusterFirst defines that the pod should use cluster DNS + // first unless hostNetwork is true, if it is available, then + // fall back on the default (as determined by kubelet) DNS settings. + DNSClusterFirst DNSPolicy = "ClusterFirst" + + // DNSDefault defines that the pod should use the default (as + // determined by kubelet) DNS settings. + DNSDefault DNSPolicy = "Default" + + // DNSNone defines that the pod should use empty DNS settings. DNS + // parameters such as nameservers and search paths should be defined via + // DNSConfig. + DNSNone DNSPolicy = "None" +) + +const ( +// DefaultTerminationGracePeriodSeconds indicates the default duration in +// seconds a pod needs to terminate gracefully. +) diff --git a/vendor/github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring/v1/doc.go b/vendor/github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring/v1/doc.go new file mode 100644 index 0000000000..64c4725273 --- /dev/null +++ b/vendor/github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring/v1/doc.go @@ -0,0 +1,18 @@ +// Copyright 2017 The prometheus-operator Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// +k8s:deepcopy-gen=package +// +groupName=monitoring.coreos.com + +package v1 diff --git a/vendor/github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring/v1/http_config.go b/vendor/github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring/v1/http_config.go new file mode 100644 index 0000000000..1590d23e09 --- /dev/null +++ b/vendor/github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring/v1/http_config.go @@ -0,0 +1,207 @@ +// Copyright 2018 The prometheus-operator Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package v1 + +import ( + "errors" + "fmt" + + v1 "k8s.io/api/core/v1" +) + +// HTTPConfigWithProxyAndTLSFiles defines the configuration for the HTTP client +// with proxy configuration and TLS configuration. It is used for +// ServiceMonitor endpoints. +type HTTPConfigWithProxyAndTLSFiles struct { + HTTPConfigWithTLSFiles `json:",inline"` + ProxyConfig `json:",inline"` +} + +// Validate semantically validates the given TLSConfig. +func (c *HTTPConfigWithProxyAndTLSFiles) Validate() error { + if err := c.HTTPConfigWithTLSFiles.Validate(); err != nil { + return err + } + + if err := c.ProxyConfig.Validate(); err != nil { + return err + } + + return nil +} + +// HTTPConfigWithProxy defines the configuration for the HTTP client with proxy +// configuration. It is used for PodMonitor endpoints and Probes. +type HTTPConfigWithProxy struct { + HTTPConfig `json:",inline"` + ProxyConfig `json:",inline"` +} + +// Validate semantically validates the given HTTPConfigWithProxy. +func (hc *HTTPConfigWithProxy) Validate() error { + if hc == nil { + return nil + } + + if err := hc.HTTPConfig.Validate(); err != nil { + return err + } + + return hc.ProxyConfig.Validate() +} + +// HTTPConfigWithoutTLS defines the configuration for the HTTP client. +type HTTPConfigWithoutTLS struct { + // authorization configures the Authorization header credentials used by + // the client. + // + // Cannot be set at the same time as `basicAuth`, `bearerTokenSecret` or `oauth2`. + // + // +optional + Authorization *SafeAuthorization `json:"authorization,omitempty"` + + // basicAuth defines the Basic Authentication credentials used by the + // client. + // + // Cannot be set at the same time as `authorization`, `bearerTokenSecret` or `oauth2`. + // + // +optional + BasicAuth *BasicAuth `json:"basicAuth,omitempty"` + + // oauth2 defines the OAuth2 settings used by the client. + // + // It requires Prometheus >= 2.27.0. + // + // Cannot be set at the same time as `authorization`, `basicAuth` or `bearerTokenSecret`. + // + // +optional + OAuth2 *OAuth2 `json:"oauth2,omitempty"` + + // bearerTokenSecret defines a key of a Secret containing the bearer token + // used by the client for authentication. The secret needs to be in the + // same namespace as the custom resource and readable by the Prometheus + // Operator. + // + // Cannot be set at the same time as `authorization`, `basicAuth` or `oauth2`. + // + // +optional + // + // Deprecated: use `authorization` instead. + BearerTokenSecret *v1.SecretKeySelector `json:"bearerTokenSecret,omitempty"` + + // followRedirects defines whether the client should follow HTTP 3xx + // redirects. + // + // +optional + FollowRedirects *bool `json:"followRedirects,omitempty"` // nolint:kubeapilinter + + // enableHttp2 can be used to disable HTTP2. + // + // +optional + EnableHTTP2 *bool `json:"enableHttp2,omitempty"` // nolint:kubeapilinter +} + +// Validate semantically validates the given HTTPConfigWithoutTLS. +func (hc *HTTPConfigWithoutTLS) Validate() error { + if hc == nil { + return nil + } + + // Check duplicate authentication methods. + switch { + case hc.Authorization != nil: + switch { + case hc.BasicAuth != nil: + return errors.New("authorization and basicAuth cannot be configured at the same time") + case hc.BearerTokenSecret != nil: + return errors.New("authorization and bearerTokenSecret cannot be configured at the same time") + case hc.OAuth2 != nil: + return errors.New("authorization and oauth2 cannot be configured at the same time") + } + case hc.BasicAuth != nil: + switch { + case hc.BearerTokenSecret != nil: + return errors.New("basicAuth and bearerTokenSecret cannot be configured at the same time") + case hc.OAuth2 != nil: + return errors.New("basicAuth and oauth2 cannot be configured at the same time") + } + case hc.BearerTokenSecret != nil: + switch { + case hc.OAuth2 != nil: + return errors.New("bearerTokenSecret and oauth2 cannot be configured at the same time") + } + } + + if err := hc.Authorization.Validate(); err != nil { + return fmt.Errorf("authorization: %w", err) + } + + if err := hc.OAuth2.Validate(); err != nil { + return fmt.Errorf("oauth2: %w", err) + } + + return nil +} + +// HTTPConfig defines the HTTP configuration + TLS configuration (only from +// secret/configmap references). +type HTTPConfig struct { + HTTPConfigWithoutTLS `json:",inline"` + + // tlsConfig defines the TLS configuration used by the client. + // + // +optional + TLSConfig *SafeTLSConfig `json:"tlsConfig,omitempty"` +} + +// Validate semantically validates the given HTTPConfig. +func (hc *HTTPConfig) Validate() error { + if hc == nil { + return nil + } + + if err := hc.HTTPConfigWithoutTLS.Validate(); err != nil { + return err + } + if err := hc.TLSConfig.Validate(); err != nil { + return fmt.Errorf("tlsConfig: %w", err) + } + + return nil +} + +// HTTPConfigWithTLSFiles defines HTTP configuration + TLS configuration +// (from secret/configmap references as well as files). +type HTTPConfigWithTLSFiles struct { + HTTPConfigWithoutTLS `json:",inline"` + + // tlsConfig defines TLS configuration used by the client. + // + // +optional + TLSConfig *TLSConfig `json:"tlsConfig,omitempty"` +} + +// Validate semantically validates the given HTTPConfigWithTLSFiles. +func (c *HTTPConfigWithTLSFiles) Validate() error { + if err := c.HTTPConfigWithoutTLS.Validate(); err != nil { + return err + } + + if err := c.TLSConfig.Validate(); err != nil { + return fmt.Errorf("tlsConfig: %w", err) + } + + return nil +} diff --git a/vendor/github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring/v1/podmonitor_types.go b/vendor/github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring/v1/podmonitor_types.go new file mode 100644 index 0000000000..d7da9bdcaa --- /dev/null +++ b/vendor/github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring/v1/podmonitor_types.go @@ -0,0 +1,336 @@ +// Copyright 2018 The prometheus-operator Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package v1 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/util/intstr" +) + +const ( + PodMonitorsKind = "PodMonitor" + PodMonitorName = "podmonitors" + PodMonitorKindKey = "podmonitor" +) + +// +genclient +// +k8s:openapi-gen=true +// +kubebuilder:resource:categories="prometheus-operator",shortName="pmon" +// +kubebuilder:subresource:status + +// The `PodMonitor` custom resource definition (CRD) defines how `Prometheus` and `PrometheusAgent` can scrape metrics from a group of pods. +// Among other things, it allows to specify: +// * The pods to scrape via label selectors. +// * The container ports to scrape. +// * Authentication credentials to use. +// * Target and metric relabeling. +// +// `Prometheus` and `PrometheusAgent` objects select `PodMonitor` objects using label and namespace selectors. +type PodMonitor struct { + // TypeMeta defines the versioned schema of this representation of an object. + // +optional + metav1.TypeMeta `json:",inline"` + // metadata defines ObjectMeta as the metadata that all persisted resources. + // +optional + metav1.ObjectMeta `json:"metadata,omitempty"` + // spec defines the specification of desired Pod selection for target discovery by Prometheus. + // +required + Spec PodMonitorSpec `json:"spec"` + // status defines the status subresource. It is under active development and is updated only when the + // "StatusForConfigurationResources" feature gate is enabled. + // + // Most recent observed status of the PodMonitor. Read-only. + // More info: + // https://github.com/kubernetes/community/blob/master/contributors/devel/sig-architecture/api-conventions.md#spec-and-status + // +optional + Status ConfigResourceStatus `json:"status,omitempty,omitzero"` +} + +// DeepCopyObject implements the runtime.Object interface. +func (l *PodMonitor) DeepCopyObject() runtime.Object { + return l.DeepCopy() +} + +func (l *PodMonitor) Bindings() []WorkloadBinding { + return l.Status.Bindings +} + +// PodMonitorSpec contains specification parameters for a PodMonitor. +// +k8s:openapi-gen=true +type PodMonitorSpec struct { + // jobLabel defines the label to use to retrieve the job name from. + // `jobLabel` selects the label from the associated Kubernetes `Pod` + // object which will be used as the `job` label for all metrics. + // + // For example if `jobLabel` is set to `foo` and the Kubernetes `Pod` + // object is labeled with `foo: bar`, then Prometheus adds the `job="bar"` + // label to all ingested metrics. + // + // If the value of this field is empty, the `job` label of the metrics + // defaults to the namespace and name of the PodMonitor object (e.g. `/`). + // +optional + JobLabel string `json:"jobLabel,omitempty"` + + // podTargetLabels defines the labels which are transferred from the + // associated Kubernetes `Pod` object onto the ingested metrics. + // + // +optional + PodTargetLabels []string `json:"podTargetLabels,omitempty"` + + // podMetricsEndpoints defines how to scrape metrics from the selected pods. + // + // +optional + PodMetricsEndpoints []PodMetricsEndpoint `json:"podMetricsEndpoints"` + + // selector defines the label selector to select the Kubernetes `Pod` objects to scrape metrics from. + // +required + Selector metav1.LabelSelector `json:"selector"` + + // selectorMechanism defines the mechanism used to select the endpoints to scrape. + // By default, the selection process relies on relabel configurations to filter the discovered targets. + // Alternatively, you can opt in for role selectors, which may offer better efficiency in large clusters. + // Which strategy is best for your use case needs to be carefully evaluated. + // + // It requires Prometheus >= v2.17.0. + // + // +optional + SelectorMechanism *SelectorMechanism `json:"selectorMechanism,omitempty"` + + // namespaceSelector defines in which namespace(s) Prometheus should discover the pods. + // By default, the pods are discovered in the same namespace as the `PodMonitor` object but it is possible to select pods across different/all namespaces. + // +optional + NamespaceSelector NamespaceSelector `json:"namespaceSelector,omitempty"` + + // sampleLimit defines a per-scrape limit on the number of scraped samples + // that will be accepted. + // + // +optional + SampleLimit *uint64 `json:"sampleLimit,omitempty"` + + // targetLimit defines a limit on the number of scraped targets that will + // be accepted. + // + // +optional + TargetLimit *uint64 `json:"targetLimit,omitempty"` + + // scrapeProtocols defines the protocols to negotiate during a scrape. It tells clients the + // protocols supported by Prometheus in order of preference (from most to least preferred). + // + // If unset, Prometheus uses its default value. + // + // It requires Prometheus >= v2.49.0. + // + // +listType=set + // +optional + ScrapeProtocols []ScrapeProtocol `json:"scrapeProtocols,omitempty"` + + // fallbackScrapeProtocol defines the protocol to use if a scrape returns blank, unparseable, or otherwise invalid Content-Type. + // + // It requires Prometheus >= v3.0.0. + // +optional + FallbackScrapeProtocol *ScrapeProtocol `json:"fallbackScrapeProtocol,omitempty"` + + // labelLimit defines the per-scrape limit on number of labels that will be accepted for a sample. + // + // It requires Prometheus >= v2.27.0. + // + // +optional + LabelLimit *uint64 `json:"labelLimit,omitempty"` + // labelNameLengthLimit defines the per-scrape limit on length of labels name that will be accepted for a sample. + // + // It requires Prometheus >= v2.27.0. + // + // +optional + LabelNameLengthLimit *uint64 `json:"labelNameLengthLimit,omitempty"` + // labelValueLengthLimit defines the per-scrape limit on length of labels value that will be accepted for a sample. + // + // It requires Prometheus >= v2.27.0. + // + // +optional + LabelValueLengthLimit *uint64 `json:"labelValueLengthLimit,omitempty"` + + NativeHistogramConfig `json:",inline"` + + // keepDroppedTargets defines the per-scrape limit on the number of targets dropped by relabeling + // that will be kept in memory. 0 means no limit. + // + // It requires Prometheus >= v2.47.0. + // + // +optional + KeepDroppedTargets *uint64 `json:"keepDroppedTargets,omitempty"` + + // attachMetadata defines additional metadata which is added to the + // discovered targets. + // + // It requires Prometheus >= v2.35.0. + // + // +optional + AttachMetadata *AttachMetadata `json:"attachMetadata,omitempty"` + + // scrapeClass defines the scrape class to apply. + // +optional + // +kubebuilder:validation:MinLength=1 + ScrapeClassName *string `json:"scrapeClass,omitempty"` + + // bodySizeLimit when defined specifies a job level limit on the size + // of uncompressed response body that will be accepted by Prometheus. + // + // It requires Prometheus >= v2.28.0. + // + // +optional + BodySizeLimit *ByteSize `json:"bodySizeLimit,omitempty"` +} + +// PodMonitorList is a list of PodMonitors. +// +k8s:openapi-gen=true +type PodMonitorList struct { + // TypeMeta defines the versioned schema of this representation of an object. + // +optional + metav1.TypeMeta `json:",inline"` + // metadata defines ListMeta as metadata for collection responses. + metav1.ListMeta `json:"metadata,omitempty"` + // List of PodMonitors + Items []PodMonitor `json:"items"` +} + +// DeepCopyObject implements the runtime.Object interface. +func (l *PodMonitorList) DeepCopyObject() runtime.Object { + return l.DeepCopy() +} + +// PodMetricsEndpoint defines an endpoint serving Prometheus metrics to be scraped by +// Prometheus. +// +// +k8s:openapi-gen=true +type PodMetricsEndpoint struct { + // port defines the `Pod` port name which exposes the endpoint. + // + // If the pod doesn't expose a port with the same name, it will result + // in no targets being discovered. + // + // If a `Pod` has multiple `Port`s with the same name (which is not + // recommended), one target instance per unique port number will be + // generated. + // + // It takes precedence over the `portNumber` and `targetPort` fields. + // +optional + Port *string `json:"port,omitempty"` + + // portNumber defines the `Pod` port number which exposes the endpoint. + // + // The `Pod` must declare the specified `Port` in its spec or the + // target will be dropped by Prometheus. + // + // This cannot be used to enable scraping of an undeclared port. + // To scrape targets on a port which isn't exposed, you need to use + // relabeling to override the `__address__` label (but beware of + // duplicate targets if the `Pod` has other declared ports). + // + // In practice Prometheus will select targets for which the + // matches the target's __meta_kubernetes_pod_container_port_number. + // + // +kubebuilder:validation:Minimum=1 + // +kubebuilder:validation:Maximum=65535 + // +optional + PortNumber *int32 `json:"portNumber,omitempty"` + + // targetPort defines the name or number of the target port of the `Pod` object behind the Service, the + // port must be specified with container port property. + // + // Deprecated: use 'port' or 'portNumber' instead. + // +optional + TargetPort *intstr.IntOrString `json:"targetPort,omitempty"` + + // path defines the HTTP path from which to scrape for metrics. + // + // If empty, Prometheus uses the default value (e.g. `/metrics`). + // +optional + Path string `json:"path,omitempty"` + + // scheme defines the HTTP scheme to use for scraping. + // + // +optional + Scheme *Scheme `json:"scheme,omitempty"` + + // params define optional HTTP URL parameters. + // +optional + //nolint:kubeapilinter + Params map[string][]string `json:"params,omitempty"` + + // interval at which Prometheus scrapes the metrics from the target. + // + // If empty, Prometheus uses the global scrape interval. + // +optional + Interval Duration `json:"interval,omitempty"` + + // scrapeTimeout defines the timeout after which Prometheus considers the scrape to be failed. + // + // If empty, Prometheus uses the global scrape timeout unless it is less + // than the target's scrape interval value in which the latter is used. + // The value cannot be greater than the scrape interval otherwise the operator will reject the resource. + // +optional + ScrapeTimeout Duration `json:"scrapeTimeout,omitempty"` + + // honorLabels when true preserves the metric's labels when they collide + // with the target's labels. + // +optional + HonorLabels bool `json:"honorLabels,omitempty"` // nolint:kubeapilinter + + // honorTimestamps defines whether Prometheus preserves the timestamps + // when exposed by the target. + // + // +optional + HonorTimestamps *bool `json:"honorTimestamps,omitempty"` // nolint:kubeapilinter + + // trackTimestampsStaleness defines whether Prometheus tracks staleness of + // the metrics that have an explicit timestamp present in scraped data. + // Has no effect if `honorTimestamps` is false. + // + // It requires Prometheus >= v2.48.0. + // + // +optional + TrackTimestampsStaleness *bool `json:"trackTimestampsStaleness,omitempty"` // nolint:kubeapilinter + + // metricRelabelings defines the relabeling rules to apply to the + // samples before ingestion. + // + // +optional + MetricRelabelConfigs []RelabelConfig `json:"metricRelabelings,omitempty"` + + // relabelings defines the relabeling rules to apply the target's + // metadata labels. + // + // The Operator automatically adds relabelings for a few standard Kubernetes fields. + // + // The original scrape job's name is available via the `__tmp_prometheus_job_name` label. + // + // More info: https://prometheus.io/docs/prometheus/latest/configuration/configuration/#relabel_config + // + // +optional + RelabelConfigs []RelabelConfig `json:"relabelings,omitempty"` + + // filterRunning when true, the pods which are not running (e.g. either in Failed or + // Succeeded state) are dropped during the target discovery. + // + // If unset, the filtering is enabled. + // + // More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle/#pod-phase + // + // +optional + FilterRunning *bool `json:"filterRunning,omitempty"` // nolint:kubeapilinter + + HTTPConfigWithProxy `json:",inline"` +} diff --git a/vendor/github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring/v1/probe_types.go b/vendor/github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring/v1/probe_types.go new file mode 100644 index 0000000000..dc7ad44ee9 --- /dev/null +++ b/vendor/github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring/v1/probe_types.go @@ -0,0 +1,298 @@ +// Copyright 2018 The prometheus-operator Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package v1 + +import ( + "errors" + + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" +) + +const ( + ProbesKind = "Probe" + ProbeName = "probes" + ProbeKindKey = "probe" +) + +// +genclient +// +k8s:openapi-gen=true +// +kubebuilder:resource:categories="prometheus-operator",shortName="prb" +// +kubebuilder:subresource:status + +// The `Probe` custom resource definition (CRD) defines how to scrape metrics from prober exporters such as the [blackbox exporter](https://github.com/prometheus/blackbox_exporter). +// +// The `Probe` resource needs 2 pieces of information: +// * The list of probed addresses which can be defined statically or by discovering Kubernetes Ingress objects. +// * The prober which exposes the availability of probed endpoints (over various protocols such HTTP, TCP, ICMP, ...) as Prometheus metrics. +// +// `Prometheus` and `PrometheusAgent` objects select `Probe` objects using label and namespace selectors. +type Probe struct { + // TypeMeta defines the versioned schema of this representation of an object. + // +optional + metav1.TypeMeta `json:",inline"` + // metadata defines ObjectMeta as the metadata that all persisted resources. + // +optional + metav1.ObjectMeta `json:"metadata,omitempty"` + // spec defines the specification of desired Ingress selection for target discovery by Prometheus. + // +required + Spec ProbeSpec `json:"spec"` + // status defines the status subresource. It is under active development and is updated only when the + // "StatusForConfigurationResources" feature gate is enabled. + // + // Most recent observed status of the Probe. Read-only. + // More info: + // https://github.com/kubernetes/community/blob/master/contributors/devel/sig-architecture/api-conventions.md#spec-and-status + // +optional + Status ConfigResourceStatus `json:"status,omitempty,omitzero"` +} + +// DeepCopyObject implements the runtime.Object interface. +func (l *Probe) DeepCopyObject() runtime.Object { + return l.DeepCopy() +} + +func (l *Probe) Bindings() []WorkloadBinding { + return l.Status.Bindings +} + +// ProbeSpec contains specification parameters for a Probe. +// +k8s:openapi-gen=true +type ProbeSpec struct { + // jobName assigned to scraped metrics by default. + // +optional + JobName string `json:"jobName,omitempty"` + + // prober defines the specification for the prober to use for probing targets. + // The prober.URL parameter is required. Targets cannot be probed if left empty. + // +optional + ProberSpec ProberSpec `json:"prober,omitempty"` + + // module to use for probing specifying how to probe the target. + // Example module configuring in the blackbox exporter: + // https://github.com/prometheus/blackbox_exporter/blob/master/example.yml + // +optional + Module string `json:"module,omitempty"` + // targets defines a set of static or dynamically discovered targets to probe. + // +optional + Targets ProbeTargets `json:"targets,omitempty"` + // interval at which targets are probed using the configured prober. + // If not specified Prometheus' global scrape interval is used. + // +optional + Interval Duration `json:"interval,omitempty"` + // scrapeTimeout defines the timeout for scraping metrics from the Prometheus exporter. + // If not specified, the Prometheus global scrape timeout is used. + // The value cannot be greater than the scrape interval otherwise the operator will reject the resource. + // +optional + ScrapeTimeout Duration `json:"scrapeTimeout,omitempty"` + + // metricRelabelings defines the RelabelConfig to apply to samples before ingestion. + // +optional + MetricRelabelConfigs []RelabelConfig `json:"metricRelabelings,omitempty"` + // authorization section for this endpoint + // +optional + Authorization *SafeAuthorization `json:"authorization,omitempty"` + // sampleLimit defines per-scrape limit on number of scraped samples that will be accepted. + // +optional + SampleLimit *uint64 `json:"sampleLimit,omitempty"` + // targetLimit defines a limit on the number of scraped targets that will be accepted. + // +optional + TargetLimit *uint64 `json:"targetLimit,omitempty"` + // scrapeProtocols defines the protocols to negotiate during a scrape. It tells clients the + // protocols supported by Prometheus in order of preference (from most to least preferred). + // + // If unset, Prometheus uses its default value. + // + // It requires Prometheus >= v2.49.0. + // + // +listType=set + // +optional + ScrapeProtocols []ScrapeProtocol `json:"scrapeProtocols,omitempty"` + + // fallbackScrapeProtocol defines the protocol to use if a scrape returns blank, unparseable, or otherwise invalid Content-Type. + // + // It requires Prometheus >= v3.0.0. + // +optional + FallbackScrapeProtocol *ScrapeProtocol `json:"fallbackScrapeProtocol,omitempty"` + + // labelLimit defines the per-scrape limit on number of labels that will be accepted for a sample. + // Only valid in Prometheus versions 2.27.0 and newer. + // +optional + LabelLimit *uint64 `json:"labelLimit,omitempty"` + + // labelNameLengthLimit defines the per-scrape limit on length of labels name that will be accepted for a sample. + // Only valid in Prometheus versions 2.27.0 and newer. + // +optional + LabelNameLengthLimit *uint64 `json:"labelNameLengthLimit,omitempty"` + + // labelValueLengthLimit defines the per-scrape limit on length of labels value that will be accepted for a sample. + // Only valid in Prometheus versions 2.27.0 and newer. + // +optional + LabelValueLengthLimit *uint64 `json:"labelValueLengthLimit,omitempty"` + + // +optional + NativeHistogramConfig `json:",inline"` + + // keepDroppedTargets defines the per-scrape limit on the number of targets dropped by relabeling + // that will be kept in memory. 0 means no limit. + // + // It requires Prometheus >= v2.47.0. + // + // +optional + KeepDroppedTargets *uint64 `json:"keepDroppedTargets,omitempty"` + + // scrapeClass defines the scrape class to apply. + // +optional + // +kubebuilder:validation:MinLength=1 + ScrapeClassName *string `json:"scrapeClass,omitempty"` + + // params defines the list of HTTP query parameters for the scrape. + // Please note that the `.spec.module` field takes precedence over the `module` parameter from this list when both are defined. + // The module name must be added using Module under ProbeSpec. + // +optional + // +kubebuilder:validation:MinItems=1 + // +listType=map + // +listMapKey=name + Params []ProbeParam `json:"params,omitempty"` + + HTTPConfig `json:",inline"` +} + +// ProbeParam defines specification of extra parameters for a Probe. +// +k8s:openapi-gen=true +type ProbeParam struct { + // name defines the parameter name + // +kubebuilder:validation:MinLength=1 + // +required + Name string `json:"name,omitempty"` + // values defines the parameter values + // +kubebuilder:validation:MinItems=1 + // +kubebuilder:validation:items:MinLength=1 + // +optional + Values []string `json:"values,omitempty"` +} + +// ProbeTargets defines how to discover the probed targets. +// One of the `staticConfig` or `ingress` must be defined. +// If both are defined, `staticConfig` takes precedence. +// +k8s:openapi-gen=true +type ProbeTargets struct { + // staticConfig defines the static list of targets to probe and the + // relabeling configuration. + // If `ingress` is also defined, `staticConfig` takes precedence. + // More info: https://prometheus.io/docs/prometheus/latest/configuration/configuration/#static_config. + // +optional + StaticConfig *ProbeTargetStaticConfig `json:"staticConfig,omitempty"` + // ingress defines the Ingress objects to probe and the relabeling + // configuration. + // If `staticConfig` is also defined, `staticConfig` takes precedence. + // +optional + Ingress *ProbeTargetIngress `json:"ingress,omitempty"` +} + +// Validate semantically validates the given ProbeTargets. +func (it *ProbeTargets) Validate() error { + if it.StaticConfig == nil && it.Ingress == nil { + return errors.New("at least one of .spec.targets.staticConfig and .spec.targets.ingress is required") + } + + return nil +} + +// ProbeTargetStaticConfig defines the set of static targets considered for probing. +// +k8s:openapi-gen=true +type ProbeTargetStaticConfig struct { + // static defines the list of hosts to probe. + // +optional + Targets []string `json:"static,omitempty"` + // labels defines all labels assigned to all metrics scraped from the targets. + // +optional + //nolint:kubeapilinter + Labels map[string]string `json:"labels,omitempty"` + // relabelingConfigs defines relabelings to be apply to the label set of the targets before it gets + // scraped. + // More info: https://prometheus.io/docs/prometheus/latest/configuration/configuration/#relabel_config + // +optional + RelabelConfigs []RelabelConfig `json:"relabelingConfigs,omitempty"` +} + +// ProbeTargetIngress defines the set of Ingress objects considered for probing. +// The operator configures a target for each host/path combination of each ingress object. +// +k8s:openapi-gen=true +type ProbeTargetIngress struct { + // selector to select the Ingress objects. + // +optional + Selector metav1.LabelSelector `json:"selector,omitempty"` + // namespaceSelector defines from which namespaces to select Ingress objects. + // +optional + NamespaceSelector NamespaceSelector `json:"namespaceSelector,omitempty"` + // relabelingConfigs to apply to the label set of the target before it gets + // scraped. + // The original ingress address is available via the + // `__tmp_prometheus_ingress_address` label. It can be used to customize the + // probed URL. + // The original scrape job's name is available via the `__tmp_prometheus_job_name` label. + // More info: https://prometheus.io/docs/prometheus/latest/configuration/configuration/#relabel_config + // +optional + RelabelConfigs []RelabelConfig `json:"relabelingConfigs,omitempty"` +} + +// ProberSpec contains specification parameters for the Prober used for probing. +// +k8s:openapi-gen=true +type ProberSpec struct { + // url defines the address of the prober. + // + // Unlike what the name indicates, the value should be in the form of + // `address:port` without any scheme which should be specified in the + // `scheme` field. + // + // +kubebuilder:validation:MinLength=1 + // +required + URL string `json:"url"` + + // scheme defines the HTTP scheme to use when scraping the prober. + // + // +optional + Scheme *Scheme `json:"scheme,omitempty"` + + // path to collect metrics from. + // Defaults to `/probe`. + // + // +kubebuilder:default:="/probe" + // +optional + Path string `json:"path,omitempty"` + + // +optional + ProxyConfig `json:",inline"` +} + +// ProbeList is a list of Probes. +// +k8s:openapi-gen=true +type ProbeList struct { + // TypeMeta defines the versioned schema of this representation of an object. + // +optional + metav1.TypeMeta `json:",inline"` + // metadata defines ListMeta as metadata for collection responses. + // +optional + metav1.ListMeta `json:"metadata,omitempty"` + // List of Probes + // +required + Items []Probe `json:"items"` +} + +// DeepCopyObject implements the runtime.Object interface. +func (l *ProbeList) DeepCopyObject() runtime.Object { + return l.DeepCopy() +} diff --git a/vendor/github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring/v1/prometheus_types.go b/vendor/github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring/v1/prometheus_types.go new file mode 100644 index 0000000000..812f6efbc8 --- /dev/null +++ b/vendor/github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring/v1/prometheus_types.go @@ -0,0 +1,2593 @@ +// Copyright 2018 The prometheus-operator Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package v1 + +import ( + "errors" + "fmt" + "strings" + + appsv1 "k8s.io/api/apps/v1" + v1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/apimachinery/pkg/util/intstr" +) + +const ( + PrometheusesKind = "Prometheus" + PrometheusName = "prometheuses" + PrometheusKindKey = "prometheus" +) + +// ScrapeProtocol represents a protocol used by Prometheus for scraping metrics. +// Supported values are: +// * `OpenMetricsText0.0.1` +// * `OpenMetricsText1.0.0` +// * `PrometheusProto` +// * `PrometheusText0.0.4` +// * `PrometheusText1.0.0` +// +kubebuilder:validation:Enum=PrometheusProto;OpenMetricsText0.0.1;OpenMetricsText1.0.0;PrometheusText0.0.4;PrometheusText1.0.0 +type ScrapeProtocol string + +const ( + PrometheusProto ScrapeProtocol = "PrometheusProto" + PrometheusText0_0_4 ScrapeProtocol = "PrometheusText0.0.4" + PrometheusText1_0_0 ScrapeProtocol = "PrometheusText1.0.0" + OpenMetricsText0_0_1 ScrapeProtocol = "OpenMetricsText0.0.1" + OpenMetricsText1_0_0 ScrapeProtocol = "OpenMetricsText1.0.0" +) + +// RuntimeConfig configures the values for the process behavior. +type RuntimeConfig struct { + // goGC defines the Go garbage collection target percentage. Lowering this number may increase the CPU usage. + // See: https://tip.golang.org/doc/gc-guide#GOGC + // +optional + // +kubebuilder:validation:Minimum=-1 + GoGC *int32 `json:"goGC,omitempty"` +} + +// PrometheusInterface is used by Prometheus and PrometheusAgent to share common methods, e.g. config generation. +// +k8s:deepcopy-gen=false +type PrometheusInterface interface { + metav1.ObjectMetaAccessor + schema.ObjectKind + + GetCommonPrometheusFields() CommonPrometheusFields + SetCommonPrometheusFields(CommonPrometheusFields) + + GetStatus() PrometheusStatus +} + +var _ = PrometheusInterface(&Prometheus{}) + +func (l *Prometheus) GetCommonPrometheusFields() CommonPrometheusFields { + return l.Spec.CommonPrometheusFields +} + +func (l *Prometheus) SetCommonPrometheusFields(f CommonPrometheusFields) { + l.Spec.CommonPrometheusFields = f +} + +func (l *Prometheus) GetStatus() PrometheusStatus { + return l.Status +} + +// +kubebuilder:validation:Enum=OnResource;OnShard +type AdditionalLabelSelectors string + +const ( + // Automatically add a label selector that will select all pods matching the same Prometheus/PrometheusAgent resource (irrespective of their shards). + ResourceNameLabelSelector AdditionalLabelSelectors = "OnResource" + + // Automatically add a label selector that will select all pods matching the same shard. + ShardAndResourceNameLabelSelector AdditionalLabelSelectors = "OnShard" +) + +type CoreV1TopologySpreadConstraint v1.TopologySpreadConstraint + +type TopologySpreadConstraint struct { + CoreV1TopologySpreadConstraint `json:",inline"` + + // additionalLabelSelectors Defines what Prometheus Operator managed labels should be added to labelSelector on the topologySpreadConstraint. + // +optional + AdditionalLabelSelectors *AdditionalLabelSelectors `json:"additionalLabelSelectors,omitempty"` +} + +// +kubebuilder:validation:MinLength:=1 +type EnableFeature string + +// CommonPrometheusFields are the options available to both the Prometheus server and agent. +// +k8s:deepcopy-gen=true +type CommonPrometheusFields struct { + // podMetadata defines labels and annotations which are propagated to the Prometheus pods. + // + // The following items are reserved and cannot be overridden: + // * "prometheus" label, set to the name of the Prometheus object. + // * "app.kubernetes.io/instance" label, set to the name of the Prometheus object. + // * "app.kubernetes.io/managed-by" label, set to "prometheus-operator". + // * "app.kubernetes.io/name" label, set to "prometheus". + // * "app.kubernetes.io/version" label, set to the Prometheus version. + // * "operator.prometheus.io/name" label, set to the name of the Prometheus object. + // * "operator.prometheus.io/shard" label, set to the shard number of the Prometheus object. + // * "kubectl.kubernetes.io/default-container" annotation, set to "prometheus". + // +optional + PodMetadata *EmbeddedObjectMetadata `json:"podMetadata,omitempty"` + + // serviceMonitorSelector defines the serviceMonitors to be selected for target discovery. An empty label + // selector matches all objects. A null label selector matches no objects. + // + // If `spec.serviceMonitorSelector`, `spec.podMonitorSelector`, `spec.probeSelector` + // and `spec.scrapeConfigSelector` are null, the Prometheus configuration is unmanaged. + // The Prometheus operator will ensure that the Prometheus configuration's + // Secret exists, but it is the responsibility of the user to provide the raw + // gzipped Prometheus configuration under the `prometheus.yaml.gz` key. + // This behavior is *deprecated* and will be removed in the next major version + // of the custom resource definition. It is recommended to use + // `spec.additionalScrapeConfigs` instead. + // +optional + ServiceMonitorSelector *metav1.LabelSelector `json:"serviceMonitorSelector,omitempty"` + // serviceMonitorNamespaceSelector defines the namespaces to match for ServicedMonitors discovery. An empty label selector + // matches all namespaces. A null label selector (default value) matches the current + // namespace only. + // +optional + ServiceMonitorNamespaceSelector *metav1.LabelSelector `json:"serviceMonitorNamespaceSelector,omitempty"` + + // podMonitorSelector defines the podMonitors to be selected for target discovery. An empty label selector + // matches all objects. A null label selector matches no objects. + // + // If `spec.serviceMonitorSelector`, `spec.podMonitorSelector`, `spec.probeSelector` + // and `spec.scrapeConfigSelector` are null, the Prometheus configuration is unmanaged. + // The Prometheus operator will ensure that the Prometheus configuration's + // Secret exists, but it is the responsibility of the user to provide the raw + // gzipped Prometheus configuration under the `prometheus.yaml.gz` key. + // This behavior is *deprecated* and will be removed in the next major version + // of the custom resource definition. It is recommended to use + // `spec.additionalScrapeConfigs` instead. + // +optional + PodMonitorSelector *metav1.LabelSelector `json:"podMonitorSelector,omitempty"` + // podMonitorNamespaceSelector defines the namespaces to match for PodMonitors discovery. An empty label selector + // matches all namespaces. A null label selector (default value) matches the current + // namespace only. + // +optional + PodMonitorNamespaceSelector *metav1.LabelSelector `json:"podMonitorNamespaceSelector,omitempty"` + + // probeSelector defines the probes to be selected for target discovery. An empty label selector + // matches all objects. A null label selector matches no objects. + // + // If `spec.serviceMonitorSelector`, `spec.podMonitorSelector`, `spec.probeSelector` + // and `spec.scrapeConfigSelector` are null, the Prometheus configuration is unmanaged. + // The Prometheus operator will ensure that the Prometheus configuration's + // Secret exists, but it is the responsibility of the user to provide the raw + // gzipped Prometheus configuration under the `prometheus.yaml.gz` key. + // This behavior is *deprecated* and will be removed in the next major version + // of the custom resource definition. It is recommended to use + // `spec.additionalScrapeConfigs` instead. + // +optional + ProbeSelector *metav1.LabelSelector `json:"probeSelector,omitempty"` + // probeNamespaceSelector defines the namespaces to match for Probe discovery. An empty label + // selector matches all namespaces. A null label selector matches the + // current namespace only. + // +optional + ProbeNamespaceSelector *metav1.LabelSelector `json:"probeNamespaceSelector,omitempty"` + + // scrapeConfigSelector defines the scrapeConfigs to be selected for target discovery. An empty label + // selector matches all objects. A null label selector matches no objects. + // + // If `spec.serviceMonitorSelector`, `spec.podMonitorSelector`, `spec.probeSelector` + // and `spec.scrapeConfigSelector` are null, the Prometheus configuration is unmanaged. + // The Prometheus operator will ensure that the Prometheus configuration's + // Secret exists, but it is the responsibility of the user to provide the raw + // gzipped Prometheus configuration under the `prometheus.yaml.gz` key. + // This behavior is *deprecated* and will be removed in the next major version + // of the custom resource definition. It is recommended to use + // `spec.additionalScrapeConfigs` instead. + // + // Note that the ScrapeConfig custom resource definition is currently at Alpha level. + // + // +optional + ScrapeConfigSelector *metav1.LabelSelector `json:"scrapeConfigSelector,omitempty"` + // scrapeConfigNamespaceSelector defines the namespaces to match for ScrapeConfig discovery. An empty label selector + // matches all namespaces. A null label selector matches the current + // namespace only. + // + // Note that the ScrapeConfig custom resource definition is currently at Alpha level. + // + // +optional + ScrapeConfigNamespaceSelector *metav1.LabelSelector `json:"scrapeConfigNamespaceSelector,omitempty"` + + // version of Prometheus being deployed. The operator uses this information + // to generate the Prometheus StatefulSet + configuration files. + // + // If not specified, the operator assumes the latest upstream version of + // Prometheus available at the time when the version of the operator was + // released. + // +optional + Version string `json:"version,omitempty"` + + // paused defines when a Prometheus deployment is paused, no actions except for deletion + // will be performed on the underlying objects. + // +optional + Paused bool `json:"paused,omitempty"` // nolint:kubeapilinter + + // image defines the container image name for Prometheus. If specified, it takes precedence + // over the `spec.baseImage`, `spec.tag` and `spec.sha` fields. + // + // Specifying `spec.version` is still necessary to ensure the Prometheus + // Operator knows which version of Prometheus is being configured. + // + // If neither `spec.image` nor `spec.baseImage` are defined, the operator + // will use the latest upstream version of Prometheus available at the time + // when the operator was released. + // + // +optional + Image *string `json:"image,omitempty"` + // imagePullPolicy defines the image pull policy for the 'prometheus', 'init-config-reloader' and 'config-reloader' containers. + // See https://kubernetes.io/docs/concepts/containers/images/#image-pull-policy for more details. + // +kubebuilder:validation:Enum="";Always;Never;IfNotPresent + // +optional + ImagePullPolicy v1.PullPolicy `json:"imagePullPolicy,omitempty"` + // imagePullSecrets defines an optional list of references to Secrets in the same namespace + // to use for pulling images from registries. + // See http://kubernetes.io/docs/user-guide/images#specifying-imagepullsecrets-on-a-pod + // +optional + ImagePullSecrets []v1.LocalObjectReference `json:"imagePullSecrets,omitempty"` + + // replicas defines the number of replicas of each shard to deploy for a Prometheus deployment. + // `spec.replicas` multiplied by `spec.shards` is the total number of Pods + // created. + // + // Default: 1 + // +optional + Replicas *int32 `json:"replicas,omitempty"` + + // shards defines the number of shards to distribute the scraped targets onto. + // + // `spec.replicas` multiplied by `spec.shards` is the total number of Pods + // being created. + // + // When not defined, the operator assumes only one shard. + // + // Note that scaling down shards will not reshard data onto the remaining + // instances, it must be manually moved. Increasing shards will not reshard + // data either but it will continue to be available from the same + // instances. To query globally, use either + // * Thanos sidecar + querier for query federation and Thanos Ruler for rules. + // * Remote-write to send metrics to a central location. + // + // By default, the sharding of targets is performed on: + // * The `__address__` target's metadata label for PodMonitor, + // ServiceMonitor and ScrapeConfig resources. + // * The `__param_target__` label for Probe resources. + // + // Users can define their own sharding implementation by setting the + // `__tmp_hash` label during the target discovery with relabeling + // configuration (either in the monitoring resources or via scrape class). + // + // You can also disable sharding on a specific target by setting the + // `__tmp_disable_sharding` label with relabeling configuration. When + // the label value isn't empty, all Prometheus shards will scrape the target. + // +optional + Shards *int32 `json:"shards,omitempty"` + + // replicaExternalLabelName defines the name of Prometheus external label used to denote the replica name. + // The external label will _not_ be added when the field is set to the + // empty string (`""`). + // + // Default: "prometheus_replica" + // +optional + ReplicaExternalLabelName *string `json:"replicaExternalLabelName,omitempty"` + // prometheusExternalLabelName defines the name of Prometheus external label used to denote the Prometheus instance + // name. The external label will _not_ be added when the field is set to + // the empty string (`""`). + // + // Default: "prometheus" + // +optional + PrometheusExternalLabelName *string `json:"prometheusExternalLabelName,omitempty"` + + // logLevel for Prometheus and the config-reloader sidecar. + // +kubebuilder:validation:Enum="";debug;info;warn;error + // +optional + LogLevel string `json:"logLevel,omitempty"` + // logFormat for Log level for Prometheus and the config-reloader sidecar. + // +kubebuilder:validation:Enum="";logfmt;json + // +optional + LogFormat string `json:"logFormat,omitempty"` + + // scrapeInterval defines interval between consecutive scrapes. + // + // Default: "30s" + // +kubebuilder:default:="30s" + // +optional + ScrapeInterval Duration `json:"scrapeInterval,omitempty"` + // scrapeTimeout defines the number of seconds to wait until a scrape request times out. + // The value cannot be greater than the scrape interval otherwise the operator will reject the resource. + // +optional + ScrapeTimeout Duration `json:"scrapeTimeout,omitempty"` + + // scrapeProtocols defines the protocols to negotiate during a scrape. It tells clients the + // protocols supported by Prometheus in order of preference (from most to least preferred). + // + // If unset, Prometheus uses its default value. + // + // It requires Prometheus >= v2.49.0. + // + // `PrometheusText1.0.0` requires Prometheus >= v3.0.0. + // + // +listType=set + // +optional + ScrapeProtocols []ScrapeProtocol `json:"scrapeProtocols,omitempty"` + + // externalLabels defines the labels to add to any time series or alerts when communicating with + // external systems (federation, remote storage, Alertmanager). + // Labels defined by `spec.replicaExternalLabelName` and + // `spec.prometheusExternalLabelName` take precedence over this list. + // +optional + //nolint:kubeapilinter + ExternalLabels map[string]string `json:"externalLabels,omitempty"` + + // enableRemoteWriteReceiver defines the Prometheus to be used as a receiver for the Prometheus remote + // write protocol. + // + // WARNING: This is not considered an efficient way of ingesting samples. + // Use it with caution for specific low-volume use cases. + // It is not suitable for replacing the ingestion via scraping and turning + // Prometheus into a push-based metrics collection system. + // For more information see https://prometheus.io/docs/prometheus/latest/querying/api/#remote-write-receiver + // + // It requires Prometheus >= v2.33.0. + // +optional + EnableRemoteWriteReceiver bool `json:"enableRemoteWriteReceiver,omitempty"` // nolint:kubeapilinter + + // enableOTLPReceiver defines the Prometheus to be used as a receiver for the OTLP Metrics protocol. + // + // Note that the OTLP receiver endpoint is automatically enabled if `.spec.otlpConfig` is defined. + // + // It requires Prometheus >= v2.47.0. + // +optional + EnableOTLPReceiver *bool `json:"enableOTLPReceiver,omitempty"` // nolint:kubeapilinter + + // remoteWriteReceiverMessageVersions list of the protobuf message versions to accept when receiving the + // remote writes. + // + // It requires Prometheus >= v2.54.0. + // + // +kubebuilder:validation:MinItems=1 + // +listType:=set + // +optional + RemoteWriteReceiverMessageVersions []RemoteWriteMessageVersion `json:"remoteWriteReceiverMessageVersions,omitempty"` + + // enableFeatures enables access to Prometheus feature flags. By default, no features are enabled. + // + // Enabling features which are disabled by default is entirely outside the + // scope of what the maintainers will support and by doing so, you accept + // that this behaviour may break at any time without notice. + // + // For more information see https://prometheus.io/docs/prometheus/latest/feature_flags/ + // + // +listType:=set + // +optional + EnableFeatures []EnableFeature `json:"enableFeatures,omitempty"` + + // externalUrl defines the external URL under which the Prometheus service is externally + // available. This is necessary to generate correct URLs (for instance if + // Prometheus is accessible behind an Ingress resource). + // +optional + ExternalURL string `json:"externalUrl,omitempty"` + // routePrefix defines the route prefix Prometheus registers HTTP handlers for. + // + // This is useful when using `spec.externalURL`, and a proxy is rewriting + // HTTP routes of a request, and the actual ExternalURL is still true, but + // the server serves requests under a different route prefix. For example + // for use with `kubectl proxy`. + // +optional + RoutePrefix string `json:"routePrefix,omitempty"` + + // storage defines the storage used by Prometheus. + // +optional + Storage *StorageSpec `json:"storage,omitempty"` + + // volumes allows the configuration of additional volumes on the output + // StatefulSet definition. Volumes specified will be appended to other + // volumes that are generated as a result of StorageSpec objects. + // +optional + Volumes []v1.Volume `json:"volumes,omitempty"` + // volumeMounts allows the configuration of additional VolumeMounts. + // + // VolumeMounts will be appended to other VolumeMounts in the 'prometheus' + // container, that are generated as a result of StorageSpec objects. + // +optional + VolumeMounts []v1.VolumeMount `json:"volumeMounts,omitempty"` + + // persistentVolumeClaimRetentionPolicy defines the field controls if and how PVCs are deleted during the lifecycle of a StatefulSet. + // The default behavior is all PVCs are retained. + // This is an alpha field from kubernetes 1.23 until 1.26 and a beta field from 1.26. + // It requires enabling the StatefulSetAutoDeletePVC feature gate. + // + // +optional + PersistentVolumeClaimRetentionPolicy *appsv1.StatefulSetPersistentVolumeClaimRetentionPolicy `json:"persistentVolumeClaimRetentionPolicy,omitempty"` + + // web defines the configuration of the Prometheus web server. + // +optional + Web *PrometheusWebSpec `json:"web,omitempty"` + + // resources defines the resources requests and limits of the 'prometheus' container. + // +optional + Resources v1.ResourceRequirements `json:"resources,omitempty"` + + // nodeSelector defines on which Nodes the Pods are scheduled. + // +optional + //nolint:kubeapilinter + NodeSelector map[string]string `json:"nodeSelector,omitempty"` + + // serviceAccountName is the name of the ServiceAccount to use to run the + // Prometheus Pods. + // +optional + ServiceAccountName string `json:"serviceAccountName,omitempty"` + + // automountServiceAccountToken defines whether a service account token should be automatically mounted in the pod. + // If the field isn't set, the operator mounts the service account token by default. + // + // **Warning:** be aware that by default, Prometheus requires the service account token for Kubernetes service discovery. + // It is possible to use strategic merge patch to project the service account token into the 'prometheus' container. + // +optional + AutomountServiceAccountToken *bool `json:"automountServiceAccountToken,omitempty"` // nolint:kubeapilinter + + // secrets defines a list of Secrets in the same namespace as the Prometheus + // object, which shall be mounted into the Prometheus Pods. + // Each Secret is added to the StatefulSet definition as a volume named `secret-`. + // The Secrets are mounted into /etc/prometheus/secrets/ in the 'prometheus' container. + // +listType:=set + // +optional + Secrets []string `json:"secrets,omitempty"` + // configMaps defines a list of ConfigMaps in the same namespace as the Prometheus + // object, which shall be mounted into the Prometheus Pods. + // Each ConfigMap is added to the StatefulSet definition as a volume named `configmap-`. + // The ConfigMaps are mounted into /etc/prometheus/configmaps/ in the 'prometheus' container. + // +optional + ConfigMaps []string `json:"configMaps,omitempty"` + + // affinity defines the Pods' affinity scheduling rules if specified. + // +optional + Affinity *v1.Affinity `json:"affinity,omitempty"` + // tolerations defines the Pods' tolerations if specified. + // +optional + Tolerations []v1.Toleration `json:"tolerations,omitempty"` + + // topologySpreadConstraints defines the pod's topology spread constraints if specified. + // +optional + TopologySpreadConstraints []TopologySpreadConstraint `json:"topologySpreadConstraints,omitempty"` + + // remoteWrite defines the list of remote write configurations. + // +optional + RemoteWrite []RemoteWriteSpec `json:"remoteWrite,omitempty"` + + // otlp defines the settings related to the OTLP receiver feature. + // It requires Prometheus >= v2.55.0. + // + // +optional + OTLP *OTLPConfig `json:"otlp,omitempty"` + + // securityContext holds pod-level security attributes and common container settings. + // This defaults to the default PodSecurityContext. + // +optional + SecurityContext *v1.PodSecurityContext `json:"securityContext,omitempty"` + + // dnsPolicy defines the DNS policy for the pods. + // + // +optional + DNSPolicy *DNSPolicy `json:"dnsPolicy,omitempty"` + // dnsConfig defines the DNS configuration for the pods. + // + // +optional + DNSConfig *PodDNSConfig `json:"dnsConfig,omitempty"` + // listenLocal when true, the Prometheus server listens on the loopback address + // instead of the Pod IP's address. + // + // +optional + ListenLocal bool `json:"listenLocal,omitempty"` // nolint:kubeapilinter + + // podManagementPolicy defines the policy for creating/deleting pods when + // scaling up and down. + // + // Unlike the default StatefulSet behavior, the default policy is + // `Parallel` to avoid manual intervention in case a pod gets stuck during + // a rollout. + // + // Note that updating this value implies the recreation of the StatefulSet + // which incurs a service outage. + // + // +optional + PodManagementPolicy *PodManagementPolicyType `json:"podManagementPolicy,omitempty"` + + // updateStrategy indicates the strategy that will be employed to update + // Pods in the StatefulSet when a revision is made to statefulset's Pod + // Template. + // + // The default strategy is RollingUpdate. + // + // +optional + UpdateStrategy *StatefulSetUpdateStrategy `json:"updateStrategy,omitempty"` + + // enableServiceLinks defines whether information about services should be injected into pod's environment variables + // +optional + EnableServiceLinks *bool `json:"enableServiceLinks,omitempty"` // nolint:kubeapilinter + + // containers allows injecting additional containers or modifying operator + // generated containers. This can be used to allow adding an authentication + // proxy to the Pods or to change the behavior of an operator generated + // container. Containers described here modify an operator generated + // container if they share the same name and modifications are done via a + // strategic merge patch. + // + // The names of containers managed by the operator are: + // * `prometheus` + // * `config-reloader` + // * `thanos-sidecar` + // + // Overriding containers is entirely outside the scope of what the + // maintainers will support and by doing so, you accept that this behaviour + // may break at any time without notice. + // +optional + Containers []v1.Container `json:"containers,omitempty"` + // initContainers allows injecting initContainers to the Pod definition. Those + // can be used to e.g. fetch secrets for injection into the Prometheus + // configuration from external sources. Any errors during the execution of + // an initContainer will lead to a restart of the Pod. More info: + // https://kubernetes.io/docs/concepts/workloads/pods/init-containers/ + // InitContainers described here modify an operator generated init + // containers if they share the same name and modifications are done via a + // strategic merge patch. + // + // The names of init container name managed by the operator are: + // * `init-config-reloader`. + // + // Overriding init containers is entirely outside the scope of what the + // maintainers will support and by doing so, you accept that this behaviour + // may break at any time without notice. + // +optional + InitContainers []v1.Container `json:"initContainers,omitempty"` + + // additionalScrapeConfigs allows specifying a key of a Secret containing + // additional Prometheus scrape configurations. Scrape configurations + // specified are appended to the configurations generated by the Prometheus + // Operator. Job configurations specified must have the form as specified + // in the official Prometheus documentation: + // https://prometheus.io/docs/prometheus/latest/configuration/configuration/#scrape_config. + // As scrape configs are appended, the user is responsible to make sure it + // is valid. Note that using this feature may expose the possibility to + // break upgrades of Prometheus. It is advised to review Prometheus release + // notes to ensure that no incompatible scrape configs are going to break + // Prometheus after the upgrade. + // +optional + AdditionalScrapeConfigs *v1.SecretKeySelector `json:"additionalScrapeConfigs,omitempty"` + + // apiserverConfig allows specifying a host and auth methods to access the + // Kuberntees API server. + // If null, Prometheus is assumed to run inside of the cluster: it will + // discover the API servers automatically and use the Pod's CA certificate + // and bearer token file at /var/run/secrets/kubernetes.io/serviceaccount/. + // +optional + APIServerConfig *APIServerConfig `json:"apiserverConfig,omitempty"` + + // priorityClassName assigned to the Pods. + // +optional + PriorityClassName string `json:"priorityClassName,omitempty"` + // portName used for the pods and governing service. + // Default: "web" + // +kubebuilder:default:="web" + // +optional + PortName string `json:"portName,omitempty"` + + // arbitraryFSAccessThroughSMs when true, ServiceMonitor, PodMonitor and Probe object are forbidden to + // reference arbitrary files on the file system of the 'prometheus' + // container. + // When a ServiceMonitor's endpoint specifies a `bearerTokenFile` value + // (e.g. '/var/run/secrets/kubernetes.io/serviceaccount/token'), a + // malicious target can get access to the Prometheus service account's + // token in the Prometheus' scrape request. Setting + // `spec.arbitraryFSAccessThroughSM` to 'true' would prevent the attack. + // Users should instead provide the credentials using the + // `spec.bearerTokenSecret` field. + // +optional + ArbitraryFSAccessThroughSMs ArbitraryFSAccessThroughSMsConfig `json:"arbitraryFSAccessThroughSMs,omitempty"` + + // overrideHonorLabels when true, Prometheus resolves label conflicts by renaming the labels in the scraped data + // to “exported_” for all targets created from ServiceMonitor, PodMonitor and + // ScrapeConfig objects. Otherwise the HonorLabels field of the service or pod monitor applies. + // In practice,`OverrideHonorLabels:true` enforces `honorLabels:false` + // for all ServiceMonitor, PodMonitor and ScrapeConfig objects. + // +optional + OverrideHonorLabels bool `json:"overrideHonorLabels,omitempty"` // nolint:kubeapilinter + // overrideHonorTimestamps when true, Prometheus ignores the timestamps for all the targets created + // from service and pod monitors. + // Otherwise the HonorTimestamps field of the service or pod monitor applies. + // +optional + OverrideHonorTimestamps bool `json:"overrideHonorTimestamps,omitempty"` // nolint:kubeapilinter + + // ignoreNamespaceSelectors when true, `spec.namespaceSelector` from all PodMonitor, ServiceMonitor + // and Probe objects will be ignored. They will only discover targets + // within the namespace of the PodMonitor, ServiceMonitor and Probe + // object. + // +optional + IgnoreNamespaceSelectors bool `json:"ignoreNamespaceSelectors,omitempty"` // nolint:kubeapilinter + + // enforcedNamespaceLabel when not empty, a label will be added to: + // + // 1. All metrics scraped from `ServiceMonitor`, `PodMonitor`, `Probe` and `ScrapeConfig` objects. + // 2. All metrics generated from recording rules defined in `PrometheusRule` objects. + // 3. All alerts generated from alerting rules defined in `PrometheusRule` objects. + // 4. All vector selectors of PromQL expressions defined in `PrometheusRule` objects. + // + // The label will not added for objects referenced in `spec.excludedFromEnforcement`. + // + // The label's name is this field's value. + // The label's value is the namespace of the `ServiceMonitor`, + // `PodMonitor`, `Probe`, `PrometheusRule` or `ScrapeConfig` object. + // +optional + EnforcedNamespaceLabel string `json:"enforcedNamespaceLabel,omitempty"` + + // enforcedSampleLimit when defined specifies a global limit on the number + // of scraped samples that will be accepted. This overrides any + // `spec.sampleLimit` set by ServiceMonitor, PodMonitor, Probe objects + // unless `spec.sampleLimit` is greater than zero and less than + // `spec.enforcedSampleLimit`. + // + // It is meant to be used by admins to keep the overall number of + // samples/series under a desired limit. + // + // When both `enforcedSampleLimit` and `sampleLimit` are defined and greater than zero, the following rules apply: + // * Scrape objects without a defined sampleLimit value will inherit the global sampleLimit value (Prometheus >= 2.45.0) or the enforcedSampleLimit value (Prometheus < v2.45.0). + // If Prometheus version is >= 2.45.0 and the `enforcedSampleLimit` is greater than the `sampleLimit`, the `sampleLimit` will be set to `enforcedSampleLimit`. + // * Scrape objects with a sampleLimit value less than or equal to enforcedSampleLimit keep their specific value. + // * Scrape objects with a sampleLimit value greater than enforcedSampleLimit are set to enforcedSampleLimit. + // + // + // +optional + EnforcedSampleLimit *uint64 `json:"enforcedSampleLimit,omitempty"` + // enforcedTargetLimit when defined specifies a global limit on the number + // of scraped targets. The value overrides any `spec.targetLimit` set by + // ServiceMonitor, PodMonitor, Probe objects unless `spec.targetLimit` is + // greater than zero and less than `spec.enforcedTargetLimit`. + // + // It is meant to be used by admins to to keep the overall number of + // targets under a desired limit. + // + // When both `enforcedTargetLimit` and `targetLimit` are defined and greater than zero, the following rules apply: + // * Scrape objects without a defined targetLimit value will inherit the global targetLimit value (Prometheus >= 2.45.0) or the enforcedTargetLimit value (Prometheus < v2.45.0). + // If Prometheus version is >= 2.45.0 and the `enforcedTargetLimit` is greater than the `targetLimit`, the `targetLimit` will be set to `enforcedTargetLimit`. + // * Scrape objects with a targetLimit value less than or equal to enforcedTargetLimit keep their specific value. + // * Scrape objects with a targetLimit value greater than enforcedTargetLimit are set to enforcedTargetLimit. + // + // + // +optional + EnforcedTargetLimit *uint64 `json:"enforcedTargetLimit,omitempty"` + // enforcedLabelLimit when defined specifies a global limit on the number + // of labels per sample. The value overrides any `spec.labelLimit` set by + // ServiceMonitor, PodMonitor, Probe objects unless `spec.labelLimit` is + // greater than zero and less than `spec.enforcedLabelLimit`. + // + // It requires Prometheus >= v2.27.0. + // + // When both `enforcedLabelLimit` and `labelLimit` are defined and greater than zero, the following rules apply: + // * Scrape objects without a defined labelLimit value will inherit the global labelLimit value (Prometheus >= 2.45.0) or the enforcedLabelLimit value (Prometheus < v2.45.0). + // If Prometheus version is >= 2.45.0 and the `enforcedLabelLimit` is greater than the `labelLimit`, the `labelLimit` will be set to `enforcedLabelLimit`. + // * Scrape objects with a labelLimit value less than or equal to enforcedLabelLimit keep their specific value. + // * Scrape objects with a labelLimit value greater than enforcedLabelLimit are set to enforcedLabelLimit. + // + // + // +optional + EnforcedLabelLimit *uint64 `json:"enforcedLabelLimit,omitempty"` + // enforcedLabelNameLengthLimit when defined specifies a global limit on the length + // of labels name per sample. The value overrides any `spec.labelNameLengthLimit` set by + // ServiceMonitor, PodMonitor, Probe objects unless `spec.labelNameLengthLimit` is + // greater than zero and less than `spec.enforcedLabelNameLengthLimit`. + // + // It requires Prometheus >= v2.27.0. + // + // When both `enforcedLabelNameLengthLimit` and `labelNameLengthLimit` are defined and greater than zero, the following rules apply: + // * Scrape objects without a defined labelNameLengthLimit value will inherit the global labelNameLengthLimit value (Prometheus >= 2.45.0) or the enforcedLabelNameLengthLimit value (Prometheus < v2.45.0). + // If Prometheus version is >= 2.45.0 and the `enforcedLabelNameLengthLimit` is greater than the `labelNameLengthLimit`, the `labelNameLengthLimit` will be set to `enforcedLabelNameLengthLimit`. + // * Scrape objects with a labelNameLengthLimit value less than or equal to enforcedLabelNameLengthLimit keep their specific value. + // * Scrape objects with a labelNameLengthLimit value greater than enforcedLabelNameLengthLimit are set to enforcedLabelNameLengthLimit. + // + // + // +optional + EnforcedLabelNameLengthLimit *uint64 `json:"enforcedLabelNameLengthLimit,omitempty"` + // enforcedLabelValueLengthLimit when not null defines a global limit on the length + // of labels value per sample. The value overrides any `spec.labelValueLengthLimit` set by + // ServiceMonitor, PodMonitor, Probe objects unless `spec.labelValueLengthLimit` is + // greater than zero and less than `spec.enforcedLabelValueLengthLimit`. + // + // It requires Prometheus >= v2.27.0. + // + // When both `enforcedLabelValueLengthLimit` and `labelValueLengthLimit` are defined and greater than zero, the following rules apply: + // * Scrape objects without a defined labelValueLengthLimit value will inherit the global labelValueLengthLimit value (Prometheus >= 2.45.0) or the enforcedLabelValueLengthLimit value (Prometheus < v2.45.0). + // If Prometheus version is >= 2.45.0 and the `enforcedLabelValueLengthLimit` is greater than the `labelValueLengthLimit`, the `labelValueLengthLimit` will be set to `enforcedLabelValueLengthLimit`. + // * Scrape objects with a labelValueLengthLimit value less than or equal to enforcedLabelValueLengthLimit keep their specific value. + // * Scrape objects with a labelValueLengthLimit value greater than enforcedLabelValueLengthLimit are set to enforcedLabelValueLengthLimit. + // + // + // +optional + EnforcedLabelValueLengthLimit *uint64 `json:"enforcedLabelValueLengthLimit,omitempty"` + // enforcedKeepDroppedTargets when defined specifies a global limit on the number of targets + // dropped by relabeling that will be kept in memory. The value overrides + // any `spec.keepDroppedTargets` set by + // ServiceMonitor, PodMonitor, Probe objects unless `spec.keepDroppedTargets` is + // greater than zero and less than `spec.enforcedKeepDroppedTargets`. + // + // It requires Prometheus >= v2.47.0. + // + // When both `enforcedKeepDroppedTargets` and `keepDroppedTargets` are defined and greater than zero, the following rules apply: + // * Scrape objects without a defined keepDroppedTargets value will inherit the global keepDroppedTargets value (Prometheus >= 2.45.0) or the enforcedKeepDroppedTargets value (Prometheus < v2.45.0). + // If Prometheus version is >= 2.45.0 and the `enforcedKeepDroppedTargets` is greater than the `keepDroppedTargets`, the `keepDroppedTargets` will be set to `enforcedKeepDroppedTargets`. + // * Scrape objects with a keepDroppedTargets value less than or equal to enforcedKeepDroppedTargets keep their specific value. + // * Scrape objects with a keepDroppedTargets value greater than enforcedKeepDroppedTargets are set to enforcedKeepDroppedTargets. + // + // + // +optional + EnforcedKeepDroppedTargets *uint64 `json:"enforcedKeepDroppedTargets,omitempty"` + // enforcedBodySizeLimit when defined specifies a global limit on the size + // of uncompressed response body that will be accepted by Prometheus. + // Targets responding with a body larger than this many bytes will cause + // the scrape to fail. + // + // It requires Prometheus >= v2.28.0. + // + // When both `enforcedBodySizeLimit` and `bodySizeLimit` are defined and greater than zero, the following rules apply: + // * Scrape objects without a defined bodySizeLimit value will inherit the global bodySizeLimit value (Prometheus >= 2.45.0) or the enforcedBodySizeLimit value (Prometheus < v2.45.0). + // If Prometheus version is >= 2.45.0 and the `enforcedBodySizeLimit` is greater than the `bodySizeLimit`, the `bodySizeLimit` will be set to `enforcedBodySizeLimit`. + // * Scrape objects with a bodySizeLimit value less than or equal to enforcedBodySizeLimit keep their specific value. + // * Scrape objects with a bodySizeLimit value greater than enforcedBodySizeLimit are set to enforcedBodySizeLimit. + // + // +optional + EnforcedBodySizeLimit ByteSize `json:"enforcedBodySizeLimit,omitempty"` + + // nameValidationScheme defines the validation scheme for metric and label names. + // + // It requires Prometheus >= v2.55.0. + // + // +optional + NameValidationScheme *NameValidationSchemeOptions `json:"nameValidationScheme,omitempty"` + + // nameEscapingScheme defines the character escaping scheme that will be requested when scraping + // for metric and label names that do not conform to the legacy Prometheus + // character set. + // + // It requires Prometheus >= v3.4.0. + // + // +optional + NameEscapingScheme *NameEscapingSchemeOptions `json:"nameEscapingScheme,omitempty"` + + // convertClassicHistogramsToNHCB defines whether to convert all scraped classic histograms into a native + // histogram with custom buckets. + // + // It requires Prometheus >= v3.4.0. + // + // +optional + ConvertClassicHistogramsToNHCB *bool `json:"convertClassicHistogramsToNHCB,omitempty"` // nolint:kubeapilinter + + // scrapeNativeHistograms defines whether to enable scraping of native histograms. + // It requires Prometheus >= v3.8.0. + // + // +optional + ScrapeNativeHistograms *bool `json:"scrapeNativeHistograms,omitempty"` // nolint:kubeapilinter + + // scrapeClassicHistograms defines whether to scrape a classic histogram that is also exposed as a native histogram. + // + // Notice: `scrapeClassicHistograms` corresponds to the `always_scrape_classic_histograms` field in the Prometheus configuration. + // + // It requires Prometheus >= v3.5.0. + // + // +optional + ScrapeClassicHistograms *bool `json:"scrapeClassicHistograms,omitempty"` // nolint:kubeapilinter + + // minReadySeconds defines the minimum number of seconds for which a newly created Pod should be ready + // without any of its container crashing for it to be considered available. + // + // If unset, pods will be considered available as soon as they are ready. + // + // +kubebuilder:validation:Minimum:=0 + // +optional + MinReadySeconds *int32 `json:"minReadySeconds,omitempty"` + + // hostAliases defines the optional list of hosts and IPs that will be injected into the Pod's + // hosts file if specified. + // + // +listType=map + // +listMapKey=ip + // +optional + HostAliases []HostAlias `json:"hostAliases,omitempty"` + + // additionalArgs allows setting additional arguments for the 'prometheus' container. + // + // It is intended for e.g. activating hidden flags which are not supported by + // the dedicated configuration options yet. The arguments are passed as-is to the + // Prometheus container which may cause issues if they are invalid or not supported + // by the given Prometheus version. + // + // In case of an argument conflict (e.g. an argument which is already set by the + // operator itself) or when providing an invalid argument, the reconciliation will + // fail and an error will be logged. + // + // +optional + AdditionalArgs []Argument `json:"additionalArgs,omitempty"` + + // walCompression defines the compression of the write-ahead log (WAL) using Snappy. + // + // WAL compression is enabled by default for Prometheus >= 2.20.0 + // + // Requires Prometheus v2.11.0 and above. + // + // +optional + WALCompression *bool `json:"walCompression,omitempty"` // nolint:kubeapilinter + + // excludedFromEnforcement defines the list of references to PodMonitor, ServiceMonitor, Probe and PrometheusRule objects + // to be excluded from enforcing a namespace label of origin. + // + // It is only applicable if `spec.enforcedNamespaceLabel` set to true. + // + // +optional + ExcludedFromEnforcement []ObjectReference `json:"excludedFromEnforcement,omitempty"` + + // hostNetwork defines the host's network namespace if true. + // + // Make sure to understand the security implications if you want to enable + // it (https://kubernetes.io/docs/concepts/configuration/overview/ ). + // + // When hostNetwork is enabled, this will set the DNS policy to + // `ClusterFirstWithHostNet` automatically (unless `.spec.DNSPolicy` is set + // to a different value). + // + // +optional + HostNetwork bool `json:"hostNetwork,omitempty"` // nolint:kubeapilinter + + // podTargetLabels are appended to the `spec.podTargetLabels` field of all + // PodMonitor and ServiceMonitor objects. + // + // +optional + PodTargetLabels []string `json:"podTargetLabels,omitempty"` + + // tracingConfig defines tracing in Prometheus. + // + // This is an *experimental feature*, it may change in any upcoming release + // in a breaking way. + // + // +optional + TracingConfig *TracingConfig `json:"tracingConfig,omitempty"` + // bodySizeLimit defines per-scrape on response body size. + // Only valid in Prometheus versions 2.45.0 and newer. + // + // Note that the global limit only applies to scrape objects that don't specify an explicit limit value. + // If you want to enforce a maximum limit for all scrape objects, refer to enforcedBodySizeLimit. + // + // +optional + BodySizeLimit *ByteSize `json:"bodySizeLimit,omitempty"` + // sampleLimit defines per-scrape limit on number of scraped samples that will be accepted. + // Only valid in Prometheus versions 2.45.0 and newer. + // + // Note that the global limit only applies to scrape objects that don't specify an explicit limit value. + // If you want to enforce a maximum limit for all scrape objects, refer to enforcedSampleLimit. + // + // +optional + SampleLimit *uint64 `json:"sampleLimit,omitempty"` + // targetLimit defines a limit on the number of scraped targets that will be accepted. + // Only valid in Prometheus versions 2.45.0 and newer. + // + // Note that the global limit only applies to scrape objects that don't specify an explicit limit value. + // If you want to enforce a maximum limit for all scrape objects, refer to enforcedTargetLimit. + // + // +optional + TargetLimit *uint64 `json:"targetLimit,omitempty"` + // labelLimit defines per-scrape limit on number of labels that will be accepted for a sample. + // Only valid in Prometheus versions 2.45.0 and newer. + // + // Note that the global limit only applies to scrape objects that don't specify an explicit limit value. + // If you want to enforce a maximum limit for all scrape objects, refer to enforcedLabelLimit. + // + // +optional + LabelLimit *uint64 `json:"labelLimit,omitempty"` + // labelNameLengthLimit defines the per-scrape limit on length of labels name that will be accepted for a sample. + // Only valid in Prometheus versions 2.45.0 and newer. + // + // Note that the global limit only applies to scrape objects that don't specify an explicit limit value. + // If you want to enforce a maximum limit for all scrape objects, refer to enforcedLabelNameLengthLimit. + // + // +optional + LabelNameLengthLimit *uint64 `json:"labelNameLengthLimit,omitempty"` + // labelValueLengthLimit defines the per-scrape limit on length of labels value that will be accepted for a sample. + // Only valid in Prometheus versions 2.45.0 and newer. + // + // Note that the global limit only applies to scrape objects that don't specify an explicit limit value. + // If you want to enforce a maximum limit for all scrape objects, refer to enforcedLabelValueLengthLimit. + // + // +optional + LabelValueLengthLimit *uint64 `json:"labelValueLengthLimit,omitempty"` + // keepDroppedTargets defines the per-scrape limit on the number of targets dropped by relabeling + // that will be kept in memory. 0 means no limit. + // + // It requires Prometheus >= v2.47.0. + // + // Note that the global limit only applies to scrape objects that don't specify an explicit limit value. + // If you want to enforce a maximum limit for all scrape objects, refer to enforcedKeepDroppedTargets. + // + // +optional + KeepDroppedTargets *uint64 `json:"keepDroppedTargets,omitempty"` + + // reloadStrategy defines the strategy used to reload the Prometheus configuration. + // If not specified, the configuration is reloaded using the /-/reload HTTP endpoint. + // +optional + ReloadStrategy *ReloadStrategyType `json:"reloadStrategy,omitempty"` + + // maximumStartupDurationSeconds defines the maximum time that the `prometheus` container's startup probe will wait before being considered failed. The startup probe will return success after the WAL replay is complete. + // If set, the value should be greater than 60 (seconds). Otherwise it will be equal to 900 seconds (15 minutes). + // +optional + // +kubebuilder:validation:Minimum=60 + MaximumStartupDurationSeconds *int32 `json:"maximumStartupDurationSeconds,omitempty"` + + // scrapeClasses defines the list of scrape classes to expose to scraping objects such as + // PodMonitors, ServiceMonitors, Probes and ScrapeConfigs. + // + // This is an *experimental feature*, it may change in any upcoming release + // in a breaking way. + // + // +listType=map + // +listMapKey=name + // +optional + ScrapeClasses []ScrapeClass `json:"scrapeClasses,omitempty"` + + // serviceDiscoveryRole defines the service discovery role used to discover targets from + // `ServiceMonitor` objects and Alertmanager endpoints. + // + // If set, the value should be either "Endpoints" or "EndpointSlice". + // If unset, the operator assumes the "Endpoints" role. + // + // +optional + ServiceDiscoveryRole *ServiceDiscoveryRole `json:"serviceDiscoveryRole,omitempty"` + + // tsdb defines the runtime reloadable configuration of the timeseries database(TSDB). + // It requires Prometheus >= v2.39.0 or PrometheusAgent >= v2.54.0. + // + // +optional + TSDB *TSDBSpec `json:"tsdb,omitempty"` + + // scrapeFailureLogFile defines the file to which scrape failures are logged. + // Reloading the configuration will reopen the file. + // + // If the filename has an empty path, e.g. 'file.log', The Prometheus Pods + // will mount the file into an emptyDir volume at `/var/log/prometheus`. + // If a full path is provided, e.g. '/var/log/prometheus/file.log', you + // must mount a volume in the specified directory and it must be writable. + // It requires Prometheus >= v2.55.0. + // + // +kubebuilder:validation:MinLength=1 + // +optional + ScrapeFailureLogFile *string `json:"scrapeFailureLogFile,omitempty"` + + // serviceName defines the name of the service name used by the underlying StatefulSet(s) as the governing service. + // If defined, the Service must be created before the Prometheus/PrometheusAgent resource in the same namespace and it must define a selector that matches the pod labels. + // If empty, the operator will create and manage a headless service named `prometheus-operated` for Prometheus resources, + // or `prometheus-agent-operated` for PrometheusAgent resources. + // When deploying multiple Prometheus/PrometheusAgent resources in the same namespace, it is recommended to specify a different value for each. + // See https://kubernetes.io/docs/concepts/workloads/controllers/statefulset/#stable-network-id for more details. + // +optional + // +kubebuilder:validation:MinLength=1 + ServiceName *string `json:"serviceName,omitempty"` + + // runtime defines the values for the Prometheus process behavior + // +optional + Runtime *RuntimeConfig `json:"runtime,omitempty"` + + // terminationGracePeriodSeconds defines the optional duration in seconds the pod needs to terminate gracefully. + // Value must be non-negative integer. The value zero indicates stop immediately via + // the kill signal (no opportunity to shut down) which may lead to data corruption. + // + // Defaults to 600 seconds. + // + // +kubebuilder:validation:Minimum:=0 + // +optional + TerminationGracePeriodSeconds *int64 `json:"terminationGracePeriodSeconds,omitempty"` + + // hostUsers supports the user space in Kubernetes. + // + // More info: https://kubernetes.io/docs/tasks/configure-pod-container/user-namespaces/ + // + // + // The feature requires at least Kubernetes 1.28 with the `UserNamespacesSupport` feature gate enabled. + // Starting Kubernetes 1.33, the feature is enabled by default. + // + // +optional + HostUsers *bool `json:"hostUsers,omitempty"` // nolint:kubeapilinter +} + +// Specifies the validation scheme for metric and label names. +// +// Supported values are: +// - `UTF8NameValidationScheme` for UTF-8 support. +// - `LegacyNameValidationScheme` for letters, numbers, colons, and underscores. +// +// Note that `LegacyNameValidationScheme` cannot be used along with the +// OpenTelemetry `NoUTF8EscapingWithSuffixes` translation strategy (if +// enabled). +// +// +kubebuilder:validation:Enum=UTF8;Legacy +type NameValidationSchemeOptions string + +const ( + UTF8NameValidationScheme NameValidationSchemeOptions = "UTF8" + LegacyNameValidationScheme NameValidationSchemeOptions = "Legacy" +) + +// Specifies the character escaping scheme that will be applied when scraping +// for metric and label names that do not conform to the legacy Prometheus +// character set. +// +// Supported values are: +// +// - `AllowUTF8`, full UTF-8 support, no escaping needed. +// - `Underscores`, legacy-invalid characters are escaped to underscores. +// - `Dots`, dot characters are escaped to `_dot_`, underscores to `__`, and +// all other legacy-invalid characters to underscores. +// - `Values`, the string is prefixed by `U__` and all invalid characters are +// escaped to their unicode value, surrounded by underscores. +// +// +kubebuilder:validation:Enum=AllowUTF8;Underscores;Dots;Values +type NameEscapingSchemeOptions string + +const ( + AllowUTF8NameEscapingScheme NameEscapingSchemeOptions = "AllowUTF8" + UnderscoresNameEscapingScheme NameEscapingSchemeOptions = "Underscores" + DotsNameEscapingScheme NameEscapingSchemeOptions = "Dots" + ValuesNameEscapingScheme NameEscapingSchemeOptions = "Values" +) + +// +kubebuilder:validation:Enum=HTTP;ProcessSignal +type ReloadStrategyType string + +const ( + // HTTPReloadStrategyType reloads the configuration using the /-/reload HTTP endpoint. + HTTPReloadStrategyType ReloadStrategyType = "HTTP" + + // ProcessSignalReloadStrategyType reloads the configuration by sending a SIGHUP signal to the process. + ProcessSignalReloadStrategyType ReloadStrategyType = "ProcessSignal" +) + +// +kubebuilder:validation:Enum=Endpoints;EndpointSlice +type ServiceDiscoveryRole string + +const ( + EndpointsRole ServiceDiscoveryRole = "Endpoints" + EndpointSliceRole ServiceDiscoveryRole = "EndpointSlice" +) + +func (cpf *CommonPrometheusFields) PrometheusURIScheme() string { + if cpf.Web != nil && cpf.Web.TLSConfig != nil { + return "https" + } + + return "http" +} + +func (cpf *CommonPrometheusFields) WebRoutePrefix() string { + if cpf.RoutePrefix != "" { + return cpf.RoutePrefix + } + + return "/" +} + +// +genclient +// +k8s:openapi-gen=true +// +kubebuilder:resource:categories="prometheus-operator",shortName="prom" +// +kubebuilder:printcolumn:name="Version",type="string",JSONPath=".spec.version",description="The version of Prometheus" +// +kubebuilder:printcolumn:name="Desired",type="integer",JSONPath=".spec.replicas",description="The number of desired replicas" +// +kubebuilder:printcolumn:name="Ready",type="integer",JSONPath=".status.availableReplicas",description="The number of ready replicas" +// +kubebuilder:printcolumn:name="Reconciled",type="string",JSONPath=".status.conditions[?(@.type == 'Reconciled')].status" +// +kubebuilder:printcolumn:name="Available",type="string",JSONPath=".status.conditions[?(@.type == 'Available')].status" +// +kubebuilder:printcolumn:name="Age",type="date",JSONPath=".metadata.creationTimestamp" +// +kubebuilder:printcolumn:name="Paused",type="boolean",JSONPath=".status.paused",description="Whether the resource reconciliation is paused or not",priority=1 +// +kubebuilder:subresource:status +// +kubebuilder:subresource:scale:specpath=.spec.shards,statuspath=.status.shards,selectorpath=.status.selector +// +genclient:method=GetScale,verb=get,subresource=scale,result=k8s.io/api/autoscaling/v1.Scale +// +genclient:method=UpdateScale,verb=update,subresource=scale,input=k8s.io/api/autoscaling/v1.Scale,result=k8s.io/api/autoscaling/v1.Scale + +// The `Prometheus` custom resource definition (CRD) defines a desired [Prometheus](https://prometheus.io/docs/prometheus) setup to run in a Kubernetes cluster. It allows to specify many options such as the number of replicas, persistent storage, and Alertmanagers where firing alerts should be sent and many more. +// +// For each `Prometheus` resource, the Operator deploys one or several `StatefulSet` objects in the same namespace. The number of StatefulSets is equal to the number of shards which is 1 by default. +// +// The resource defines via label and namespace selectors which `ServiceMonitor`, `PodMonitor`, `Probe` and `PrometheusRule` objects should be associated to the deployed Prometheus instances. +// +// The Operator continuously reconciles the scrape and rules configuration and a sidecar container running in the Prometheus pods triggers a reload of the configuration when needed. +type Prometheus struct { + // TypeMeta defines the versioned schema of this representation of an object. + metav1.TypeMeta `json:",inline"` + // metadata defines ObjectMeta as the metadata that all persisted resources. + // +optional + metav1.ObjectMeta `json:"metadata,omitempty"` + // spec defines the specification of the desired behavior of the Prometheus cluster. More info: + // https://github.com/kubernetes/community/blob/master/contributors/devel/sig-architecture/api-conventions.md#spec-and-status + // +required + Spec PrometheusSpec `json:"spec"` + // status defines the most recent observed status of the Prometheus cluster. Read-only. + // More info: + // https://github.com/kubernetes/community/blob/master/contributors/devel/sig-architecture/api-conventions.md#spec-and-status + // +optional + Status PrometheusStatus `json:"status,omitempty"` +} + +// DeepCopyObject implements the runtime.Object interface. +func (l *Prometheus) DeepCopyObject() runtime.Object { + return l.DeepCopy() +} + +// PrometheusList is a list of Prometheuses. +// +k8s:openapi-gen=true +type PrometheusList struct { + // TypeMeta defines the versioned schema of this representation of an object. + // +optional + metav1.TypeMeta `json:",inline"` + // metadata defines ListMeta as metadata for collection responses. + // +optional + metav1.ListMeta `json:"metadata,omitempty"` + // List of Prometheuses + Items []Prometheus `json:"items"` +} + +// DeepCopyObject implements the runtime.Object interface. +func (l *PrometheusList) DeepCopyObject() runtime.Object { + return l.DeepCopy() +} + +// PrometheusSpec is a specification of the desired behavior of the Prometheus cluster. More info: +// https://github.com/kubernetes/community/blob/master/contributors/devel/sig-architecture/api-conventions.md#spec-and-status +// +k8s:openapi-gen=true +type PrometheusSpec struct { + // +optional + CommonPrometheusFields `json:",inline"` + + // baseImage is deprecated: use 'spec.image' instead. + // +optional + BaseImage string `json:"baseImage,omitempty"` + // tag is deprecated: use 'spec.image' instead. The image's tag can be specified as part of the image name. + // +optional + Tag string `json:"tag,omitempty"` + // sha is deprecated: use 'spec.image' instead. The image's digest can be specified as part of the image name. + // +optional + SHA string `json:"sha,omitempty"` + + // retention defines how long to retain the Prometheus data. + // + // Default: "24h" if `spec.retention` and `spec.retentionSize` are empty. + // +optional + Retention Duration `json:"retention,omitempty"` + // retentionSize defines the maximum number of bytes used by the Prometheus data. + // +optional + RetentionSize ByteSize `json:"retentionSize,omitempty"` + + // shardRetentionPolicy defines the retention policy for the Prometheus shards. + // (Alpha) Using this field requires the 'PrometheusShardRetentionPolicy' feature gate to be enabled. + // + // The final goals for this feature can be seen at https://github.com/prometheus-operator/prometheus-operator/blob/main/Documentation/proposals/202310-shard-autoscaling.md#graceful-scale-down-of-prometheus-servers, + // however, the feature is not yet fully implemented in this PR. The limitation being: + // * Retention duration is not settable, for now, shards are retained forever. + // + // +optional + ShardRetentionPolicy *ShardRetentionPolicy `json:"shardRetentionPolicy,omitempty"` + + // disableCompaction when true, the Prometheus compaction is disabled. + // When `spec.thanos.objectStorageConfig` or `spec.objectStorageConfigFile` are defined, the operator automatically + // disables block compaction to avoid race conditions during block uploads (as the Thanos documentation recommends). + // +optional + DisableCompaction bool `json:"disableCompaction,omitempty"` // nolint:kubeapilinter + + // rules defines the configuration of the Prometheus rules' engine. + // +optional + Rules Rules `json:"rules,omitempty"` + // prometheusRulesExcludedFromEnforce defines the list of PrometheusRule objects to which the namespace label + // enforcement doesn't apply. + // This is only relevant when `spec.enforcedNamespaceLabel` is set to true. + // +optional + // Deprecated: use `spec.excludedFromEnforcement` instead. + PrometheusRulesExcludedFromEnforce []PrometheusRuleExcludeConfig `json:"prometheusRulesExcludedFromEnforce,omitempty"` + // ruleSelector defines the prometheusRule objects to be selected for rule evaluation. An empty + // label selector matches all objects. A null label selector matches no + // objects. + // +optional + RuleSelector *metav1.LabelSelector `json:"ruleSelector,omitempty"` + // ruleNamespaceSelector defines the namespaces to match for PrometheusRule discovery. An empty label selector + // matches all namespaces. A null label selector matches the current + // namespace only. + // +optional + RuleNamespaceSelector *metav1.LabelSelector `json:"ruleNamespaceSelector,omitempty"` + + // query defines the configuration of the Prometheus query service. + // +optional + Query *QuerySpec `json:"query,omitempty"` + + // alerting defines the settings related to Alertmanager. + // +optional + Alerting *AlertingSpec `json:"alerting,omitempty"` + // additionalAlertRelabelConfigs defines a key of a Secret containing + // additional Prometheus alert relabel configurations. The alert relabel + // configurations are appended to the configuration generated by the + // Prometheus Operator. They must be formatted according to the official + // Prometheus documentation: + // + // https://prometheus.io/docs/prometheus/latest/configuration/configuration/#alert_relabel_configs + // + // The user is responsible for making sure that the configurations are valid + // + // Note that using this feature may expose the possibility to break + // upgrades of Prometheus. It is advised to review Prometheus release notes + // to ensure that no incompatible alert relabel configs are going to break + // Prometheus after the upgrade. + // +optional + AdditionalAlertRelabelConfigs *v1.SecretKeySelector `json:"additionalAlertRelabelConfigs,omitempty"` + // additionalAlertManagerConfigs defines a key of a Secret containing + // additional Prometheus Alertmanager configurations. The Alertmanager + // configurations are appended to the configuration generated by the + // Prometheus Operator. They must be formatted according to the official + // Prometheus documentation: + // + // https://prometheus.io/docs/prometheus/latest/configuration/configuration/#alertmanager_config + // + // The user is responsible for making sure that the configurations are valid + // + // Note that using this feature may expose the possibility to break + // upgrades of Prometheus. It is advised to review Prometheus release notes + // to ensure that no incompatible AlertManager configs are going to break + // Prometheus after the upgrade. + // +optional + AdditionalAlertManagerConfigs *v1.SecretKeySelector `json:"additionalAlertManagerConfigs,omitempty"` + + // remoteRead defines the list of remote read configurations. + // +optional + RemoteRead []RemoteReadSpec `json:"remoteRead,omitempty"` + + // thanos defines the configuration of the optional Thanos sidecar. + // + // +optional + Thanos *ThanosSpec `json:"thanos,omitempty"` + + // queryLogFile specifies where the file to which PromQL queries are logged. + // + // If the filename has an empty path, e.g. 'query.log', The Prometheus Pods + // will mount the file into an emptyDir volume at `/var/log/prometheus`. + // If a full path is provided, e.g. '/var/log/prometheus/query.log', you + // must mount a volume in the specified directory and it must be writable. + // This is because the prometheus container runs with a read-only root + // filesystem for security reasons. + // Alternatively, the location can be set to a standard I/O stream, e.g. + // `/dev/stdout`, to log query information to the default Prometheus log + // stream. + // +optional + QueryLogFile string `json:"queryLogFile,omitempty"` + + // allowOverlappingBlocks enables vertical compaction and vertical query + // merge in Prometheus. + // + // Deprecated: this flag has no effect for Prometheus >= 2.39.0 where overlapping blocks are enabled by default. + // +optional + AllowOverlappingBlocks bool `json:"allowOverlappingBlocks,omitempty"` // nolint:kubeapilinter + + // exemplars related settings that are runtime reloadable. + // It requires to enable the `exemplar-storage` feature flag to be effective. + // +optional + Exemplars *Exemplars `json:"exemplars,omitempty"` + + // evaluationInterval defines the interval between rule evaluations. + // Default: "30s" + // +kubebuilder:default:="30s" + // +optional + EvaluationInterval Duration `json:"evaluationInterval,omitempty"` + + // ruleQueryOffset defines the offset the rule evaluation timestamp of this particular group by the specified duration into the past. + // It requires Prometheus >= v2.53.0. + // +optional + RuleQueryOffset *Duration `json:"ruleQueryOffset,omitempty"` + + // enableAdminAPI defines access to the Prometheus web admin API. + // + // WARNING: Enabling the admin APIs enables mutating endpoints, to delete data, + // shutdown Prometheus, and more. Enabling this should be done with care and the + // user is advised to add additional authentication authorization via a proxy to + // ensure only clients authorized to perform these actions can do so. + // + // For more information: + // https://prometheus.io/docs/prometheus/latest/querying/api/#tsdb-admin-apis + // +optional + EnableAdminAPI bool `json:"enableAdminAPI,omitempty"` // nolint:kubeapilinter +} + +type WhenScaledRetentionType string + +var ( + RetainWhenScaledRetentionType WhenScaledRetentionType = "Retain" + DeleteWhenScaledRetentionType WhenScaledRetentionType = "Delete" +) + +type RetainConfig struct { + // retentionPeriod defines the retentionPeriod for shard retention policy. + // +required + RetentionPeriod Duration `json:"retentionPeriod"` +} + +type ShardRetentionPolicy struct { + // whenScaled defines the retention policy when the Prometheus shards are scaled down. + // * `Delete`, the operator will delete the pods from the scaled-down shard(s). + // * `Retain`, the operator will keep the pods from the scaled-down shard(s), so the data can still be queried. + // + // If not defined, the operator assumes the `Delete` value. + // +kubebuilder:validation:Enum=Retain;Delete + // +optional + WhenScaled *WhenScaledRetentionType `json:"whenScaled,omitempty"` + // retain defines the config for retention when the retention policy is set to `Retain`. + // This field is ineffective as of now. + // +optional + Retain *RetainConfig `json:"retain,omitempty"` +} + +// PrometheusStatus is the most recent observed status of the Prometheus cluster. +// More info: +// https://github.com/kubernetes/community/blob/master/contributors/devel/sig-architecture/api-conventions.md#spec-and-status +// +k8s:openapi-gen=true +type PrometheusStatus struct { + // paused defines whether any actions on the underlying managed objects are + // being performed. Only delete actions will be performed. + // +optional + Paused bool `json:"paused"` // nolint:kubeapilinter + // replicas defines the total number of non-terminated pods targeted by this Prometheus deployment + // (their labels match the selector). + // +optional + Replicas int32 `json:"replicas"` + // updatedReplicas defines the total number of non-terminated pods targeted by this Prometheus deployment + // that have the desired version spec. + // +optional + UpdatedReplicas int32 `json:"updatedReplicas"` + // availableReplicas defines the total number of available pods (ready for at least minReadySeconds) + // targeted by this Prometheus deployment. + // +optional + AvailableReplicas int32 `json:"availableReplicas"` + // unavailableReplicas defines the total number of unavailable pods targeted by this Prometheus deployment. + // +optional + UnavailableReplicas int32 `json:"unavailableReplicas"` + // conditions defines the current state of the Prometheus deployment. + // +listType=map + // +listMapKey=type + // +optional + Conditions []Condition `json:"conditions,omitempty"` + // shardStatuses defines the list has one entry per shard. Each entry provides a summary of the shard status. + // +listType=map + // +listMapKey=shardID + // +optional + ShardStatuses []ShardStatus `json:"shardStatuses,omitempty"` + // shards defines the most recently observed number of shards. + // +optional + Shards int32 `json:"shards,omitempty"` + // selector used to match the pods targeted by this Prometheus resource. + // +optional + Selector string `json:"selector,omitempty"` +} + +// AlertingSpec defines parameters for alerting configuration of Prometheus servers. +// +k8s:openapi-gen=true +type AlertingSpec struct { + // alertmanagers endpoints where Prometheus should send alerts to. + // +required + Alertmanagers []AlertmanagerEndpoints `json:"alertmanagers"` +} + +// StorageSpec defines the configured storage for a group Prometheus servers. +// If no storage option is specified, then by default an [EmptyDir](https://kubernetes.io/docs/concepts/storage/volumes/#emptydir) will be used. +// +// If multiple storage options are specified, priority will be given as follows: +// 1. emptyDir +// 2. ephemeral +// 3. volumeClaimTemplate +// +// +k8s:openapi-gen=true +type StorageSpec struct { + // disableMountSubPath deprecated: subPath usage will be removed in a future release. + // +optional + DisableMountSubPath bool `json:"disableMountSubPath,omitempty"` // nolint:kubeapilinter + // emptyDir to be used by the StatefulSet. + // If specified, it takes precedence over `ephemeral` and `volumeClaimTemplate`. + // More info: https://kubernetes.io/docs/concepts/storage/volumes/#emptydir + // +optional + EmptyDir *v1.EmptyDirVolumeSource `json:"emptyDir,omitempty"` + // ephemeral to be used by the StatefulSet. + // This is a beta field in k8s 1.21 and GA in 1.15. + // For lower versions, starting with k8s 1.19, it requires enabling the GenericEphemeralVolume feature gate. + // More info: https://kubernetes.io/docs/concepts/storage/ephemeral-volumes/#generic-ephemeral-volumes + // +optional + Ephemeral *v1.EphemeralVolumeSource `json:"ephemeral,omitempty"` + // volumeClaimTemplate defines the PVC spec to be used by the Prometheus StatefulSets. + // The easiest way to use a volume that cannot be automatically provisioned + // is to use a label selector alongside manually created PersistentVolumes. + // +optional + VolumeClaimTemplate EmbeddedPersistentVolumeClaim `json:"volumeClaimTemplate,omitempty"` +} + +// QuerySpec defines the query command line flags when starting Prometheus. +// +k8s:openapi-gen=true +type QuerySpec struct { + // lookbackDelta defines the delta difference allowed for retrieving metrics during expression evaluations. + // +optional + LookbackDelta *string `json:"lookbackDelta,omitempty"` + // maxConcurrency defines the number of concurrent queries that can be run at once. + // +kubebuilder:validation:Minimum:=1 + // +optional + MaxConcurrency *int32 `json:"maxConcurrency,omitempty"` + // maxSamples defines the maximum number of samples a single query can load into memory. Note that + // queries will fail if they would load more samples than this into memory, + // so this also limits the number of samples a query can return. + // +optional + MaxSamples *int32 `json:"maxSamples,omitempty"` + // timeout defines the maximum time a query may take before being aborted. + // +optional + Timeout *Duration `json:"timeout,omitempty"` +} + +// PrometheusWebSpec defines the configuration of the Prometheus web server. +// +k8s:openapi-gen=true +type PrometheusWebSpec struct { + // +optional + WebConfigFileFields `json:",inline"` + // pageTitle defines the prometheus web page title. + // +optional + PageTitle *string `json:"pageTitle,omitempty"` + + // maxConnections defines the maximum number of simultaneous connections + // A zero value means that Prometheus doesn't accept any incoming connection. + // +kubebuilder:validation:Minimum:=0 + // +optional + MaxConnections *int32 `json:"maxConnections,omitempty"` +} + +// ThanosSpec defines the configuration of the Thanos sidecar. +// +k8s:openapi-gen=true +type ThanosSpec struct { + // image defines the container image name for Thanos. If specified, it takes precedence over + // the `spec.thanos.baseImage`, `spec.thanos.tag` and `spec.thanos.sha` + // fields. + // + // Specifying `spec.thanos.version` is still necessary to ensure the + // Prometheus Operator knows which version of Thanos is being configured. + // + // If neither `spec.thanos.image` nor `spec.thanos.baseImage` are defined, + // the operator will use the latest upstream version of Thanos available at + // the time when the operator was released. + // + // +optional + Image *string `json:"image,omitempty"` + + // version of Thanos being deployed. The operator uses this information + // to generate the Prometheus StatefulSet + configuration files. + // + // If not specified, the operator assumes the latest upstream release of + // Thanos available at the time when the version of the operator was + // released. + // + // +optional + Version *string `json:"version,omitempty"` + // tag is deprecated: use 'image' instead. The image's tag can be specified as as part of the image name. + // +optional + Tag *string `json:"tag,omitempty"` + // sha is deprecated: use 'image' instead. The image digest can be specified as part of the image name. + // +optional + SHA *string `json:"sha,omitempty"` + // baseImage is deprecated: use 'image' instead. + // +optional + BaseImage *string `json:"baseImage,omitempty"` + + // resources defines the resources requests and limits of the Thanos sidecar. + // +optional + Resources v1.ResourceRequirements `json:"resources,omitempty"` + + // objectStorageConfig defines the Thanos sidecar's configuration to upload TSDB blocks to object storage. + // + // More info: https://thanos.io/tip/thanos/storage.md/ + // + // objectStorageConfigFile takes precedence over this field. + // +optional + ObjectStorageConfig *v1.SecretKeySelector `json:"objectStorageConfig,omitempty"` + // objectStorageConfigFile defines the Thanos sidecar's configuration file to upload TSDB blocks to object storage. + // + // More info: https://thanos.io/tip/thanos/storage.md/ + // + // This field takes precedence over objectStorageConfig. + // +optional + ObjectStorageConfigFile *string `json:"objectStorageConfigFile,omitempty"` + + // listenLocal is deprecated: use `grpcListenLocal` and `httpListenLocal` instead. + // +optional + ListenLocal bool `json:"listenLocal,omitempty"` // nolint:kubeapilinter + + // grpcListenLocal defines when true, the Thanos sidecar listens on the loopback interface instead + // of the Pod IP's address for the gRPC endpoints. + // + // It has no effect if `listenLocal` is true. + // +optional + GRPCListenLocal bool `json:"grpcListenLocal,omitempty"` // nolint:kubeapilinter + + // httpListenLocal when true, the Thanos sidecar listens on the loopback interface instead + // of the Pod IP's address for the HTTP endpoints. + // + // It has no effect if `listenLocal` is true. + // +optional + HTTPListenLocal bool `json:"httpListenLocal,omitempty"` // nolint:kubeapilinter + + // tracingConfig defines the tracing configuration for the Thanos sidecar. + // + // `tracingConfigFile` takes precedence over this field. + // + // More info: https://thanos.io/tip/thanos/tracing.md/ + // + // This is an *experimental feature*, it may change in any upcoming release + // in a breaking way. + // + // +optional + TracingConfig *v1.SecretKeySelector `json:"tracingConfig,omitempty"` + // tracingConfigFile defines the tracing configuration file for the Thanos sidecar. + // + // This field takes precedence over `tracingConfig`. + // + // More info: https://thanos.io/tip/thanos/tracing.md/ + // + // This is an *experimental feature*, it may change in any upcoming release + // in a breaking way. + // +optional + TracingConfigFile string `json:"tracingConfigFile,omitempty"` + + // grpcServerTlsConfig defines the TLS parameters for the gRPC server providing the StoreAPI. + // + // Note: Currently only the `caFile`, `certFile`, and `keyFile` fields are supported. + // + // +optional + GRPCServerTLSConfig *TLSConfig `json:"grpcServerTlsConfig,omitempty"` + + // logLevel for the Thanos sidecar. + // +kubebuilder:validation:Enum="";debug;info;warn;error + // +optional + LogLevel string `json:"logLevel,omitempty"` + // logFormat for the Thanos sidecar. + // +kubebuilder:validation:Enum="";logfmt;json + // +optional + LogFormat string `json:"logFormat,omitempty"` + + // minTime defines the start of time range limit served by the Thanos sidecar's StoreAPI. + // The field's value should be a constant time in RFC3339 format or a time + // duration relative to current time, such as -1d or 2h45m. Valid duration + // units are ms, s, m, h, d, w, y. + // +optional + MinTime string `json:"minTime,omitempty"` + + // blockSize controls the size of TSDB blocks produced by Prometheus. + // The default value is 2h to match the upstream Prometheus defaults. + // + // WARNING: Changing the block duration can impact the performance and + // efficiency of the entire Prometheus/Thanos stack due to how it interacts + // with memory and Thanos compactors. It is recommended to keep this value + // set to a multiple of 120 times your longest scrape or rule interval. For + // example, 30s * 120 = 1h. + // + // +kubebuilder:default:="2h" + // +optional + BlockDuration Duration `json:"blockSize,omitempty"` + + // readyTimeout defines the maximum time that the Thanos sidecar will wait for + // Prometheus to start. + // +optional + ReadyTimeout Duration `json:"readyTimeout,omitempty"` + // getConfigInterval defines how often to retrieve the Prometheus configuration. + // +optional + GetConfigInterval Duration `json:"getConfigInterval,omitempty"` + // getConfigTimeout defines the maximum time to wait when retrieving the Prometheus configuration. + // +optional + GetConfigTimeout Duration `json:"getConfigTimeout,omitempty"` + + // volumeMounts allows configuration of additional VolumeMounts for Thanos. + // VolumeMounts specified will be appended to other VolumeMounts in the + // 'thanos-sidecar' container. + // +optional + VolumeMounts []v1.VolumeMount `json:"volumeMounts,omitempty"` + + // additionalArgs allows setting additional arguments for the Thanos container. + // The arguments are passed as-is to the Thanos container which may cause issues + // if they are invalid or not supported the given Thanos version. + // In case of an argument conflict (e.g. an argument which is already set by the + // operator itself) or when providing an invalid argument, the reconciliation will + // fail and an error will be logged. + // +optional + AdditionalArgs []Argument `json:"additionalArgs,omitempty"` +} + +// RemoteWriteSpec defines the configuration to write samples from Prometheus +// to a remote endpoint. +// +k8s:openapi-gen=true +type RemoteWriteSpec struct { + // url defines the URL of the endpoint to send samples to. + // +kubebuilder:validation:MinLength=1 + // +required + URL string `json:"url"` + + // name of the remote write queue, it must be unique if specified. The + // name is used in metrics and logging in order to differentiate queues. + // + // It requires Prometheus >= v2.15.0 or Thanos >= 0.24.0. + // + // +optional + Name *string `json:"name,omitempty"` + + // messageVersion defines the Remote Write message's version to use when writing to the endpoint. + // + // `Version1.0` corresponds to the `prometheus.WriteRequest` protobuf message introduced in Remote Write 1.0. + // `Version2.0` corresponds to the `io.prometheus.write.v2.Request` protobuf message introduced in Remote Write 2.0. + // + // When `Version2.0` is selected, Prometheus will automatically be + // configured to append the metadata of scraped metrics to the WAL. + // + // Before setting this field, consult with your remote storage provider + // what message version it supports. + // + // It requires Prometheus >= v2.54.0 or Thanos >= v0.37.0. + // + // +optional + MessageVersion *RemoteWriteMessageVersion `json:"messageVersion,omitempty"` + + // sendExemplars enables sending of exemplars over remote write. Note that + // exemplar-storage itself must be enabled using the `spec.enableFeatures` + // option for exemplars to be scraped in the first place. + // + // It requires Prometheus >= v2.27.0 or Thanos >= v0.24.0. + // + // +optional + SendExemplars *bool `json:"sendExemplars,omitempty"` // nolint:kubeapilinter + + // sendNativeHistograms enables sending of native histograms, also known as sparse histograms + // over remote write. + // + // It requires Prometheus >= v2.40.0 or Thanos >= v0.30.0. + // + // +optional + SendNativeHistograms *bool `json:"sendNativeHistograms,omitempty"` // nolint:kubeapilinter + + // remoteTimeout defines the timeout for requests to the remote write endpoint. + // +optional + RemoteTimeout *Duration `json:"remoteTimeout,omitempty"` + + // headers defines the custom HTTP headers to be sent along with each remote write request. + // Be aware that headers that are set by Prometheus itself can't be overwritten. + // + // It requires Prometheus >= v2.25.0 or Thanos >= v0.24.0. + // + // +optional + //nolint:kubeapilinter + Headers map[string]string `json:"headers,omitempty"` + + // writeRelabelConfigs defines the list of remote write relabel configurations. + // +optional + WriteRelabelConfigs []RelabelConfig `json:"writeRelabelConfigs,omitempty"` + + // oauth2 configuration for the URL. + // + // It requires Prometheus >= v2.27.0 or Thanos >= v0.24.0. + // + // Cannot be set at the same time as `sigv4`, `authorization`, `basicAuth`, or `azureAd`. + // +optional + OAuth2 *OAuth2 `json:"oauth2,omitempty"` + + // basicAuth configuration for the URL. + // + // Cannot be set at the same time as `sigv4`, `authorization`, `oauth2`, or `azureAd`. + // + // +optional + BasicAuth *BasicAuth `json:"basicAuth,omitempty"` + + // bearerTokenFile defines the file from which to read bearer token for the URL. + // + // Deprecated: this will be removed in a future release. Prefer using `authorization`. + // +optional + BearerTokenFile string `json:"bearerTokenFile,omitempty"` + + // authorization section for the URL. + // + // It requires Prometheus >= v2.26.0 or Thanos >= v0.24.0. + // + // Cannot be set at the same time as `sigv4`, `basicAuth`, `oauth2`, or `azureAd`. + // + // +optional + Authorization *Authorization `json:"authorization,omitempty"` + + // sigv4 defines the AWS's Signature Verification 4 for the URL. + // + // It requires Prometheus >= v2.26.0 or Thanos >= v0.24.0. + // + // Cannot be set at the same time as `authorization`, `basicAuth`, `oauth2`, or `azureAd`. + // + // +optional + Sigv4 *Sigv4 `json:"sigv4,omitempty"` + + // azureAd for the URL. + // + // It requires Prometheus >= v2.45.0 or Thanos >= v0.31.0. + // + // Cannot be set at the same time as `authorization`, `basicAuth`, `oauth2`, or `sigv4`. + // + // +optional + AzureAD *AzureAD `json:"azureAd,omitempty"` + + // bearerToken is deprecated: this will be removed in a future release. + // *Warning: this field shouldn't be used because the token value appears + // in clear-text. Prefer using `authorization`.* + // + // +optional + BearerToken string `json:"bearerToken,omitempty"` + + // tlsConfig to use for the URL. + // +optional + TLSConfig *TLSConfig `json:"tlsConfig,omitempty"` + + // Optional ProxyConfig. + // +optional + ProxyConfig `json:",inline"` + + // followRedirects defines whether HTTP requests follow HTTP 3xx redirects. + // + // It requires Prometheus >= v2.26.0 or Thanos >= v0.24.0. + // + // +optional + FollowRedirects *bool `json:"followRedirects,omitempty"` // nolint:kubeapilinter + + // queueConfig allows tuning of the remote write queue parameters. + // +optional + QueueConfig *QueueConfig `json:"queueConfig,omitempty"` + + // metadataConfig defines how to send a series metadata to the remote storage. + // + // When the field is empty, **no metadata** is sent. But when the field is + // null, metadata is sent. + // + // +optional + MetadataConfig *MetadataConfig `json:"metadataConfig,omitempty"` + + // enableHTTP2 defines whether to enable HTTP2. + // +optional + EnableHttp2 *bool `json:"enableHTTP2,omitempty"` // nolint:kubeapilinter + + // roundRobinDNS controls the DNS resolution behavior for remote-write connections. + // When enabled: + // - The remote-write mechanism will resolve the hostname via DNS. + // - It will randomly select one of the resolved IP addresses and connect to it. + // + // When disabled (default behavior): + // - The Go standard library will handle hostname resolution. + // - It will attempt connections to each resolved IP address sequentially. + // + // Note: The connection timeout applies to the entire resolution and connection process. + // + // If disabled, the timeout is distributed across all connection attempts. + // + // It requires Prometheus >= v3.1.0 or Thanos >= v0.38.0. + // + // +optional + RoundRobinDNS *bool `json:"roundRobinDNS,omitempty"` // nolint:kubeapilinter +} + +// +kubebuilder:validation:Enum=V1.0;V2.0 +type RemoteWriteMessageVersion string + +const ( + // Remote Write message's version 1.0. + RemoteWriteMessageVersion1_0 = RemoteWriteMessageVersion("V1.0") + // Remote Write message's version 2.0. + RemoteWriteMessageVersion2_0 = RemoteWriteMessageVersion("V2.0") +) + +// QueueConfig allows the tuning of remote write's queue_config parameters. +// This object is referenced in the RemoteWriteSpec object. +// +k8s:openapi-gen=true +type QueueConfig struct { + // capacity defines the number of samples to buffer per shard before we start + // dropping them. + // +optional + Capacity int `json:"capacity,omitempty"` + // minShards defines the minimum number of shards, i.e. amount of concurrency. + // +optional + MinShards int `json:"minShards,omitempty"` + // maxShards defines the maximum number of shards, i.e. amount of concurrency. + // +optional + MaxShards int `json:"maxShards,omitempty"` + // maxSamplesPerSend defines the maximum number of samples per send. + // +optional + MaxSamplesPerSend int `json:"maxSamplesPerSend,omitempty"` + // batchSendDeadline defines the maximum time a sample will wait in buffer. + // +optional + BatchSendDeadline *Duration `json:"batchSendDeadline,omitempty"` + // maxRetries defines the maximum number of times to retry a batch on recoverable errors. + // +optional + MaxRetries int `json:"maxRetries,omitempty"` + // minBackoff defines the initial retry delay. Gets doubled for every retry. + // +optional + MinBackoff *Duration `json:"minBackoff,omitempty"` + // maxBackoff defines the maximum retry delay. + // +optional + MaxBackoff *Duration `json:"maxBackoff,omitempty"` + // retryOnRateLimit defines the retry upon receiving a 429 status code from the remote-write storage. + // + // This is an *experimental feature*, it may change in any upcoming release + // in a breaking way. + // +optional + RetryOnRateLimit bool `json:"retryOnRateLimit,omitempty"` // nolint:kubeapilinter + // sampleAgeLimit drops samples older than the limit. + // It requires Prometheus >= v2.50.0 or Thanos >= v0.32.0. + // + // +optional + SampleAgeLimit *Duration `json:"sampleAgeLimit,omitempty"` +} + +// Sigv4 defines AWS's Signature Verification 4 signing process to +// sign requests. +// +k8s:openapi-gen=true +type Sigv4 struct { + // region defines the AWS region. If blank, the region from the default credentials chain used. + // +optional + Region string `json:"region,omitempty"` + // accessKey defines the AWS API key. If not specified, the environment variable + // `AWS_ACCESS_KEY_ID` is used. + // +optional + AccessKey *v1.SecretKeySelector `json:"accessKey,omitempty"` + // secretKey defines the AWS API secret. If not specified, the environment + // variable `AWS_SECRET_ACCESS_KEY` is used. + // +optional + SecretKey *v1.SecretKeySelector `json:"secretKey,omitempty"` + // profile defines the named AWS profile used to authenticate. + // +optional + Profile string `json:"profile,omitempty"` + // roleArn defines the named AWS profile used to authenticate. + // +optional + RoleArn string `json:"roleArn,omitempty"` + // useFIPSSTSEndpoint defines the FIPS mode for the AWS STS endpoint. + // It requires Prometheus >= v2.54.0. + // + // +optional + UseFIPSSTSEndpoint *bool `json:"useFIPSSTSEndpoint,omitempty"` // nolint:kubeapilinter +} + +// AzureAD defines the configuration for remote write's azuread parameters. +// +k8s:openapi-gen=true +type AzureAD struct { + // cloud defines the Azure Cloud. Options are 'AzurePublic', 'AzureChina', or 'AzureGovernment'. + // +kubebuilder:validation:Enum=AzureChina;AzureGovernment;AzurePublic + // +optional + Cloud *string `json:"cloud,omitempty"` + // managedIdentity defines the Azure User-assigned Managed identity. + // Cannot be set at the same time as `oauth`, `sdk` or `workloadIdentity`. + // +optional + ManagedIdentity *ManagedIdentity `json:"managedIdentity,omitempty"` + // oauth defines the oauth config that is being used to authenticate. + // Cannot be set at the same time as `managedIdentity`, `sdk` or `workloadIdentity`. + // + // It requires Prometheus >= v2.48.0 or Thanos >= v0.31.0. + // + // +optional + OAuth *AzureOAuth `json:"oauth,omitempty"` + // sdk defines the Azure SDK config that is being used to authenticate. + // See https://learn.microsoft.com/en-us/azure/developer/go/azure-sdk-authentication + // Cannot be set at the same time as `oauth`, `managedIdentity` or `workloadIdentity`. + // + // It requires Prometheus >= v2.52.0 or Thanos >= v0.36.0. + // +optional + SDK *AzureSDK `json:"sdk,omitempty"` + // workloadIdentity defines the Azure Workload Identity authentication. + // Cannot be set at the same time as `oauth`, `managedIdentity`, or `sdk`. + // + // It requires Prometheus >= 3.7.0. Currently not supported by Thanos. + // +optional + WorkloadIdentity *AzureWorkloadIdentity `json:"workloadIdentity,omitempty"` + // scope is the custom OAuth 2.0 scope to request when acquiring tokens. + // It requires Prometheus >= 3.9.0. Currently not supported by Thanos. + // +kubebuilder:validation:Pattern=`^[\w\s:/.\\-]+$` + // +optional + Scope *string `json:"scope,omitempty"` +} + +// AzureOAuth defines the Azure OAuth settings. +// +k8s:openapi-gen=true +type AzureOAuth struct { + // clientId defines the clientId of the Azure Active Directory application that is being used to authenticate. + // +required + // +kubebuilder:validation:MinLength=1 + ClientID string `json:"clientId"` + // clientSecret specifies a key of a Secret containing the client secret of the Azure Active Directory application that is being used to authenticate. + // +required + ClientSecret v1.SecretKeySelector `json:"clientSecret"` + // tenantId is the tenant ID of the Azure Active Directory application that is being used to authenticate. + // +required + // +kubebuilder:validation:MinLength=1 + // +kubebuilder:validation:Pattern:=^[0-9a-zA-Z-.]+$ + TenantID string `json:"tenantId"` +} + +// ManagedIdentity defines the Azure User-assigned Managed identity. +// +k8s:openapi-gen=true +type ManagedIdentity struct { + // clientId defines the Azure User-assigned Managed identity. + // + // For Prometheus >= 3.5.0 and Thanos >= 0.40.0, this field is allowed to be empty to support system-assigned managed identities. + // + // +optional + // +kubebuilder:validation:MinLength:=1 + ClientID *string `json:"clientId"` +} + +// AzureSDK is used to store azure SDK config values. +type AzureSDK struct { + // tenantId defines the tenant ID of the azure active directory application that is being used to authenticate. + // +optional + // +kubebuilder:validation:Pattern:=^[0-9a-zA-Z-.]+$ + TenantID *string `json:"tenantId,omitempty"` +} + +// AzureWorkloadIdentity defines the Azure Workload Identity authentication configuration. +type AzureWorkloadIdentity struct { + // clientId is the clientID of the Azure Active Directory application. + // +kubebuilder:validation:MinLength=1 + // +required + ClientID string `json:"clientId"` + + // tenantId is the tenant ID of the Azure Active Directory application. + // +kubebuilder:validation:MinLength=1 + // +required + TenantID string `json:"tenantId"` +} + +// RemoteReadSpec defines the configuration for Prometheus to read back samples +// from a remote endpoint. +// +k8s:openapi-gen=true +type RemoteReadSpec struct { + // url defines the URL of the endpoint to query from. + // +required + URL string `json:"url"` + + // name of the remote read queue, it must be unique if specified. The + // name is used in metrics and logging in order to differentiate read + // configurations. + // + // It requires Prometheus >= v2.15.0. + // + // +optional + Name string `json:"name,omitempty"` + + // requiredMatchers defines an optional list of equality matchers which have to be present + // in a selector to query the remote read endpoint. + // +optional + //nolint:kubeapilinter + RequiredMatchers map[string]string `json:"requiredMatchers,omitempty"` + + // remoteTimeout defines the timeout for requests to the remote read endpoint. + // +optional + RemoteTimeout *Duration `json:"remoteTimeout,omitempty"` + + // headers defines the custom HTTP headers to be sent along with each remote read request. + // Be aware that headers that are set by Prometheus itself can't be overwritten. + // Only valid in Prometheus versions 2.26.0 and newer. + // +optional + //nolint:kubeapilinter + Headers map[string]string `json:"headers,omitempty"` + + // readRecent defines whether reads should be made for queries for time ranges that + // the local storage should have complete data for. + // +optional + ReadRecent bool `json:"readRecent,omitempty"` // nolint:kubeapilinter + + // oauth2 configuration for the URL. + // + // It requires Prometheus >= v2.27.0. + // + // Cannot be set at the same time as `authorization`, or `basicAuth`. + // + // +optional + OAuth2 *OAuth2 `json:"oauth2,omitempty"` + // basicAuth configuration for the URL. + // + // Cannot be set at the same time as `authorization`, or `oauth2`. + // + // +optional + BasicAuth *BasicAuth `json:"basicAuth,omitempty"` + // bearerTokenFile defines the file from which to read the bearer token for the URL. + // + // Deprecated: this will be removed in a future release. Prefer using `authorization`. + // +optional + BearerTokenFile string `json:"bearerTokenFile,omitempty"` + // authorization section for the URL. + // + // It requires Prometheus >= v2.26.0. + // + // Cannot be set at the same time as `basicAuth`, or `oauth2`. + // + // +optional + Authorization *Authorization `json:"authorization,omitempty"` + + // bearerToken is deprecated: this will be removed in a future release. + // *Warning: this field shouldn't be used because the token value appears + // in clear-text. Prefer using `authorization`.* + // + // +optional + BearerToken string `json:"bearerToken,omitempty"` + + // tlsConfig to use for the URL. + // +optional + TLSConfig *TLSConfig `json:"tlsConfig,omitempty"` + + // Optional ProxyConfig. + // +optional + ProxyConfig `json:",inline"` + + // followRedirects defines whether HTTP requests follow HTTP 3xx redirects. + // + // It requires Prometheus >= v2.26.0. + // + // +optional + FollowRedirects *bool `json:"followRedirects,omitempty"` // nolint:kubeapilinter + + // filterExternalLabels defines whether to use the external labels as selectors for the remote read endpoint. + // + // It requires Prometheus >= v2.34.0. + // + // +optional + FilterExternalLabels *bool `json:"filterExternalLabels,omitempty"` // nolint:kubeapilinter +} + +// RelabelConfig allows dynamic rewriting of the label set for targets, alerts, +// scraped samples and remote write samples. +// +// More info: https://prometheus.io/docs/prometheus/latest/configuration/configuration/#relabel_config +// +// +k8s:openapi-gen=true +type RelabelConfig struct { + // sourceLabels defines the source labels select values from existing labels. Their content is + // concatenated using the configured Separator and matched against the + // configured regular expression. + // + // +optional + SourceLabels []LabelName `json:"sourceLabels,omitempty"` + + // separator defines the string between concatenated SourceLabels. + // +optional + Separator *string `json:"separator,omitempty"` + + // targetLabel defines the label to which the resulting string is written in a replacement. + // + // It is mandatory for `Replace`, `HashMod`, `Lowercase`, `Uppercase`, + // `KeepEqual` and `DropEqual` actions. + // + // Regex capture groups are available. + // +optional + TargetLabel string `json:"targetLabel,omitempty"` + + // regex defines the regular expression against which the extracted value is matched. + // +optional + Regex string `json:"regex,omitempty"` + + // modulus to take of the hash of the source label values. + // + // Only applicable when the action is `HashMod`. + // +optional + Modulus uint64 `json:"modulus,omitempty"` + + // replacement value against which a Replace action is performed if the + // regular expression matches. + // + // Regex capture groups are available. + // + // +optional + Replacement *string `json:"replacement,omitempty"` + + // action to perform based on the regex matching. + // + // `Uppercase` and `Lowercase` actions require Prometheus >= v2.36.0. + // `DropEqual` and `KeepEqual` actions require Prometheus >= v2.41.0. + // + // Default: "Replace" + // + // +kubebuilder:validation:Enum=replace;Replace;keep;Keep;drop;Drop;hashmod;HashMod;labelmap;LabelMap;labeldrop;LabelDrop;labelkeep;LabelKeep;lowercase;Lowercase;uppercase;Uppercase;keepequal;KeepEqual;dropequal;DropEqual + // +kubebuilder:default=replace + // +optional + Action string `json:"action,omitempty"` +} + +// APIServerConfig defines how the Prometheus server connects to the Kubernetes API server. +// +// More info: https://prometheus.io/docs/prometheus/latest/configuration/configuration/#kubernetes_sd_config +// +// +k8s:openapi-gen=true +type APIServerConfig struct { + // host defines the Kubernetes API address consisting of a hostname or IP address followed + // by an optional port number. + // +required + Host string `json:"host"` + + // basicAuth configuration for the API server. + // + // Cannot be set at the same time as `authorization`, `bearerToken`, or + // `bearerTokenFile`. + // + // +optional + BasicAuth *BasicAuth `json:"basicAuth,omitempty"` + + // bearerTokenFile defines the file to read bearer token for accessing apiserver. + // + // Cannot be set at the same time as `basicAuth`, `authorization`, or `bearerToken`. + // + // Deprecated: this will be removed in a future release. Prefer using `authorization`. + // +optional + BearerTokenFile string `json:"bearerTokenFile,omitempty"` + + // tlsConfig to use for the API server. + // + // +optional + TLSConfig *TLSConfig `json:"tlsConfig,omitempty"` + + // authorization section for the API server. + // + // Cannot be set at the same time as `basicAuth`, `bearerToken`, or + // `bearerTokenFile`. + // + // +optional + Authorization *Authorization `json:"authorization,omitempty"` + + // bearerToken is deprecated: this will be removed in a future release. + // *Warning: this field shouldn't be used because the token value appears + // in clear-text. Prefer using `authorization`.* + // + // +optional + BearerToken string `json:"bearerToken,omitempty"` + + // Optional ProxyConfig. + // +optional + ProxyConfig `json:",inline"` +} + +// +kubebuilder:validation:Enum=v1;V1;v2;V2 +type AlertmanagerAPIVersion string + +const ( + AlertmanagerAPIVersion1 = AlertmanagerAPIVersion("V1") + AlertmanagerAPIVersion2 = AlertmanagerAPIVersion("V2") +) + +// AlertmanagerEndpoints defines a selection of a single Endpoints object +// containing Alertmanager IPs to fire alerts against. +// +k8s:openapi-gen=true +type AlertmanagerEndpoints struct { + // namespace of the Endpoints object. + // + // If not set, the object will be discovered in the namespace of the + // Prometheus object. + // + // +kubebuilder:validation:MinLength:=1 + // +optional + Namespace *string `json:"namespace,omitempty"` + + // name of the Endpoints object in the namespace. + // + // +kubebuilder:validation:MinLength:=1 + // +required + Name string `json:"name"` + + // port on which the Alertmanager API is exposed. + // +required + Port intstr.IntOrString `json:"port"` + + // scheme defines the HTTP scheme to use when sending alerts. + // + // +optional + Scheme *Scheme `json:"scheme,omitempty"` + + // pathPrefix defines the prefix for the HTTP path alerts are pushed to. + // + // +kubebuilder:validation:MinLength=1 + // +optional + PathPrefix *string `json:"pathPrefix,omitempty"` + + // tlsConfig to use for Alertmanager. + // + // +optional + TLSConfig *TLSConfig `json:"tlsConfig,omitempty"` + + // basicAuth configuration for Alertmanager. + // + // Cannot be set at the same time as `bearerTokenFile`, `authorization` or `sigv4`. + // + // +optional + BasicAuth *BasicAuth `json:"basicAuth,omitempty"` + + // bearerTokenFile defines the file to read bearer token for Alertmanager. + // + // Cannot be set at the same time as `basicAuth`, `authorization`, or `sigv4`. + // + // Deprecated: this will be removed in a future release. Prefer using `authorization`. + // +optional + BearerTokenFile string `json:"bearerTokenFile,omitempty"` + + // authorization section for Alertmanager. + // + // Cannot be set at the same time as `basicAuth`, `bearerTokenFile` or `sigv4`. + // + // +optional + Authorization *SafeAuthorization `json:"authorization,omitempty"` + + // sigv4 defines AWS's Signature Verification 4 for the URL. + // + // It requires Prometheus >= v2.48.0. + // + // Cannot be set at the same time as `basicAuth`, `bearerTokenFile` or `authorization`. + // + // +optional + Sigv4 *Sigv4 `json:"sigv4,omitempty"` + + // ProxyConfig + // +optional + ProxyConfig `json:",inline"` + + // apiVersion defines the version of the Alertmanager API that Prometheus uses to send alerts. + // It can be "V1" or "V2". + // The field has no effect for Prometheus >= v3.0.0 because only the v2 API is supported. + // + // +optional + APIVersion *AlertmanagerAPIVersion `json:"apiVersion,omitempty"` + + // timeout defines a per-target Alertmanager timeout when pushing alerts. + // + // +optional + Timeout *Duration `json:"timeout,omitempty"` + + // enableHttp2 defines whether to enable HTTP2. + // + // +optional + EnableHttp2 *bool `json:"enableHttp2,omitempty"` // nolint:kubeapilinter + + // relabelings defines the relabel configuration applied to the discovered Alertmanagers. + // + // +optional + RelabelConfigs []RelabelConfig `json:"relabelings,omitempty"` + + // alertRelabelings defines the relabeling configs applied before sending alerts to a specific Alertmanager. + // It requires Prometheus >= v2.51.0. + // + // +optional + AlertRelabelConfigs []RelabelConfig `json:"alertRelabelings,omitempty"` +} + +// +k8s:openapi-gen=true +type Rules struct { + // alert defines the parameters of the Prometheus rules' engine. + // + // Any update to these parameters trigger a restart of the pods. + // +optional + Alert RulesAlert `json:"alert,omitempty"` +} + +// +k8s:openapi-gen=true +type RulesAlert struct { + // forOutageTolerance defines the max time to tolerate prometheus outage for restoring 'for' state of + // alert. + // +optional + ForOutageTolerance string `json:"forOutageTolerance,omitempty"` + + // forGracePeriod defines the minimum duration between alert and restored 'for' state. + // + // This is maintained only for alerts with a configured 'for' time greater + // than the grace period. + // +optional + ForGracePeriod string `json:"forGracePeriod,omitempty"` + + // resendDelay defines the minimum amount of time to wait before resending an alert to + // Alertmanager. + // +optional + ResendDelay string `json:"resendDelay,omitempty"` +} + +// MetadataConfig configures the sending of series metadata to the remote storage. +// +// +k8s:openapi-gen=true +type MetadataConfig struct { + // send defines whether metric metadata is sent to the remote storage or not. + // + // +optional + Send bool `json:"send,omitempty"` // nolint:kubeapilinter + + // sendInterval defines how frequently metric metadata is sent to the remote storage. + // + // +optional + SendInterval Duration `json:"sendInterval,omitempty"` + + // maxSamplesPerSend defines the maximum number of metadata samples per send. + // + // It requires Prometheus >= v2.29.0. + // + // +optional + // +kubebuilder:validation:Minimum=-1 + MaxSamplesPerSend *int32 `json:"maxSamplesPerSend,omitempty"` +} + +type ShardStatus struct { + // shardID defines the identifier of the shard. + // +required + ShardID string `json:"shardID"` + // replicas defines the total number of pods targeted by this shard. + // +required + Replicas int32 `json:"replicas"` + // updatedReplicas defines the total number of non-terminated pods targeted by this shard + // that have the desired spec. + // +required + UpdatedReplicas int32 `json:"updatedReplicas"` + // availableReplicas defines the total number of available pods (ready for at least minReadySeconds) + // targeted by this shard. + // +required + AvailableReplicas int32 `json:"availableReplicas"` + // unavailableReplicas defines the Total number of unavailable pods targeted by this shard. + // +required + UnavailableReplicas int32 `json:"unavailableReplicas"` +} + +type TSDBSpec struct { + // outOfOrderTimeWindow defines how old an out-of-order/out-of-bounds sample can be with + // respect to the TSDB max time. + // + // An out-of-order/out-of-bounds sample is ingested into the TSDB as long as + // the timestamp of the sample is >= (TSDB.MaxTime - outOfOrderTimeWindow). + // + // This is an *experimental feature*, it may change in any upcoming release + // in a breaking way. + // + // It requires Prometheus >= v2.39.0 or PrometheusAgent >= v2.54.0. + // +optional + OutOfOrderTimeWindow *Duration `json:"outOfOrderTimeWindow,omitempty"` +} + +type Exemplars struct { + // maxSize defines the maximum number of exemplars stored in memory for all series. + // + // exemplar-storage itself must be enabled using the `spec.enableFeature` + // option for exemplars to be scraped in the first place. + // + // If not set, Prometheus uses its default value. A value of zero or less + // than zero disables the storage. + // + // +optional + MaxSize *int64 `json:"maxSize,omitempty"` +} + +// SafeAuthorization specifies a subset of the Authorization struct, that is +// safe for use because it doesn't provide access to the Prometheus container's +// filesystem. +// +// +k8s:openapi-gen=true +type SafeAuthorization struct { + // type defines the authentication type. The value is case-insensitive. + // + // "Basic" is not a supported value. + // + // Default: "Bearer" + // +optional + Type string `json:"type,omitempty"` + + // credentials defines a key of a Secret in the namespace that contains the credentials for authentication. + // +optional + Credentials *v1.SecretKeySelector `json:"credentials,omitempty"` +} + +// Validate semantically validates the given Authorization section. +func (c *SafeAuthorization) Validate() error { + if c == nil { + return nil + } + + if strings.ToLower(strings.TrimSpace(c.Type)) == "basic" { + return errors.New("authorization type cannot be set to \"basic\", use \"basicAuth\" instead") + } + + if c.Credentials == nil { + return errors.New("authorization credentials are required") + } + + return nil +} + +type Authorization struct { + // +optional + SafeAuthorization `json:",inline"` + + // credentialsFile defines the file to read a secret from, mutually exclusive with `credentials`. + // +optional + CredentialsFile string `json:"credentialsFile,omitempty"` +} + +// Validate semantically validates the given Authorization section. +func (c *Authorization) Validate() error { + if c == nil { + return nil + } + + if c.Credentials != nil && c.CredentialsFile != "" { + return errors.New("authorization can not specify both \"credentials\" and \"credentialsFile\"") + } + + if strings.ToLower(strings.TrimSpace(c.Type)) == "basic" { + return errors.New("authorization type cannot be set to \"basic\", use \"basicAuth\" instead") + } + + return nil +} + +type ScrapeClass struct { + // name of the scrape class. + // + // +kubebuilder:validation:MinLength=1 + // +required + Name string `json:"name"` + + // default defines that the scrape applies to all scrape objects that + // don't configure an explicit scrape class name. + // + // Only one scrape class can be set as the default. + // + // +optional + Default *bool `json:"default,omitempty"` // nolint:kubeapilinter + + // fallbackScrapeProtocol defines the protocol to use if a scrape returns blank, unparseable, or otherwise invalid Content-Type. + // It will only apply if the scrape resource doesn't specify any FallbackScrapeProtocol + // + // It requires Prometheus >= v3.0.0. + // +optional + FallbackScrapeProtocol *ScrapeProtocol `json:"fallbackScrapeProtocol,omitempty"` + + // tlsConfig defines the TLS settings to use for the scrape. When the + // scrape objects define their own CA, certificate and/or key, they take + // precedence over the corresponding scrape class fields. + // + // For now only the `caFile`, `certFile` and `keyFile` fields are supported. + // + // +optional + TLSConfig *TLSConfig `json:"tlsConfig,omitempty"` + + // authorization section for the ScrapeClass. + // It will only apply if the scrape resource doesn't specify any Authorization. + // +optional + Authorization *Authorization `json:"authorization,omitempty"` + + // relabelings defines the relabeling rules to apply to all scrape targets. + // + // The Operator automatically adds relabelings for a few standard Kubernetes fields + // like `__meta_kubernetes_namespace` and `__meta_kubernetes_service_name`. + // Then the Operator adds the scrape class relabelings defined here. + // Then the Operator adds the target-specific relabelings defined in the scrape object. + // + // More info: https://prometheus.io/docs/prometheus/latest/configuration/configuration/#relabel_config + // + // +optional + Relabelings []RelabelConfig `json:"relabelings,omitempty"` + + // metricRelabelings defines the relabeling rules to apply to all samples before ingestion. + // + // The Operator adds the scrape class metric relabelings defined here. + // Then the Operator adds the target-specific metric relabelings defined in ServiceMonitors, PodMonitors, Probes and ScrapeConfigs. + // Then the Operator adds namespace enforcement relabeling rule, specified in '.spec.enforcedNamespaceLabel'. + // + // More info: https://prometheus.io/docs/prometheus/latest/configuration/configuration/#metric_relabel_configs + // + // +optional + MetricRelabelings []RelabelConfig `json:"metricRelabelings,omitempty"` + + // attachMetadata defines additional metadata to the discovered targets. + // When the scrape object defines its own configuration, it takes + // precedence over the scrape class configuration. + // + // +optional + AttachMetadata *AttachMetadata `json:"attachMetadata,omitempty"` +} + +// TranslationStrategyOption represents a translation strategy option for the OTLP endpoint. +// Supported values are: +// * `NoUTF8EscapingWithSuffixes` +// * `UnderscoreEscapingWithSuffixes` +// * `UnderscoreEscapingWithoutSuffixes` +// * `NoTranslation` +// +kubebuilder:validation:Enum=NoUTF8EscapingWithSuffixes;UnderscoreEscapingWithSuffixes;NoTranslation;UnderscoreEscapingWithoutSuffixes +type TranslationStrategyOption string + +const ( + NoUTF8EscapingWithSuffixes TranslationStrategyOption = "NoUTF8EscapingWithSuffixes" + UnderscoreEscapingWithSuffixes TranslationStrategyOption = "UnderscoreEscapingWithSuffixes" + // It requires Prometheus >= v3.4.0. + NoTranslation TranslationStrategyOption = "NoTranslation" + // It requires Prometheus >= v3.6.0. + UnderscoreEscapingWithoutSuffixes TranslationStrategyOption = "UnderscoreEscapingWithoutSuffixes" +) + +// OTLPConfig is the configuration for writing to the OTLP endpoint. +// +// +k8s:openapi-gen=true +type OTLPConfig struct { + // promoteAllResourceAttributes promotes all resource attributes to metric labels except the ones defined in `ignoreResourceAttributes`. + // + // Cannot be true when `promoteResourceAttributes` is defined. + // It requires Prometheus >= v3.5.0. + // +optional + PromoteAllResourceAttributes *bool `json:"promoteAllResourceAttributes,omitempty"` // nolint:kubeapilinter + + // ignoreResourceAttributes defines the list of OpenTelemetry resource attributes to ignore when `promoteAllResourceAttributes` is true. + // + // It requires `promoteAllResourceAttributes` to be true. + // It requires Prometheus >= v3.5.0. + // +kubebuilder:validation:MinItems=1 + // +kubebuilder:validation:items:MinLength=1 + // +listType=set + // +optional + IgnoreResourceAttributes []string `json:"ignoreResourceAttributes,omitempty"` + + // promoteResourceAttributes defines the list of OpenTelemetry Attributes that should be promoted to metric labels, defaults to none. + // Cannot be defined when `promoteAllResourceAttributes` is true. + // + // +kubebuilder:validation:MinItems=1 + // +kubebuilder:validation:items:MinLength=1 + // +listType=set + // +optional + PromoteResourceAttributes []string `json:"promoteResourceAttributes,omitempty"` + + // translationStrategy defines how the OTLP receiver endpoint translates the incoming metrics. + // + // It requires Prometheus >= v3.0.0. + // +optional + TranslationStrategy *TranslationStrategyOption `json:"translationStrategy,omitempty"` + + // keepIdentifyingResourceAttributes enables adding `service.name`, `service.namespace` and `service.instance.id` + // resource attributes to the `target_info` metric, on top of converting them into the `instance` and `job` labels. + // + // It requires Prometheus >= v3.1.0. + // +optional + KeepIdentifyingResourceAttributes *bool `json:"keepIdentifyingResourceAttributes,omitempty"` // nolint:kubeapilinter + + // convertHistogramsToNHCB defines optional translation of OTLP explicit bucket histograms into native histograms with custom buckets. + // It requires Prometheus >= v3.4.0. + // +optional + ConvertHistogramsToNHCB *bool `json:"convertHistogramsToNHCB,omitempty"` // nolint:kubeapilinter + + // promoteScopeMetadata controls whether to promote OpenTelemetry scope metadata (i.e. name, version, schema URL, and attributes) to metric labels. + // As per the OpenTelemetry specification, the aforementioned scope metadata should be identifying, i.e. made into metric labels. + // It requires Prometheus >= v3.6.0. + // +optional + PromoteScopeMetadata *bool `json:"promoteScopeMetadata,omitempty"` // nolint:kubeapilinter +} + +// Validate semantically validates the given OTLPConfig section. +func (c *OTLPConfig) Validate() error { + if c == nil { + return nil + } + + if len(c.PromoteResourceAttributes) > 0 && c.PromoteAllResourceAttributes != nil && *c.PromoteAllResourceAttributes { + return fmt.Errorf("'promoteAllResourceAttributes' cannot be set to 'true' simultaneously with 'promoteResourceAttributes'") + } + + if len(c.IgnoreResourceAttributes) > 0 && (c.PromoteAllResourceAttributes == nil || !*c.PromoteAllResourceAttributes) { + return fmt.Errorf("'ignoreResourceAttributes' can only be set when 'promoteAllResourceAttributes' is true") + } + + return nil +} diff --git a/vendor/github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring/v1/prometheusrule_types.go b/vendor/github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring/v1/prometheusrule_types.go new file mode 100644 index 0000000000..439b57c53f --- /dev/null +++ b/vendor/github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring/v1/prometheusrule_types.go @@ -0,0 +1,170 @@ +// Copyright 2018 The prometheus-operator Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package v1 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/util/intstr" +) + +const ( + PrometheusRuleKind = "PrometheusRule" + PrometheusRuleName = "prometheusrules" + PrometheusRuleKindKey = "prometheusrule" +) + +// +genclient +// +k8s:openapi-gen=true +// +kubebuilder:resource:categories="prometheus-operator",shortName="promrule" +// +kubebuilder:subresource:status + +// The `PrometheusRule` custom resource definition (CRD) defines [alerting](https://prometheus.io/docs/prometheus/latest/configuration/alerting_rules/) and [recording](https://prometheus.io/docs/prometheus/latest/configuration/recording_rules/) rules to be evaluated by `Prometheus` or `ThanosRuler` objects. +// +// `Prometheus` and `ThanosRuler` objects select `PrometheusRule` objects using label and namespace selectors. +type PrometheusRule struct { + // TypeMeta defines the versioned schema of this representation of an object. + metav1.TypeMeta `json:",inline"` + // metadata defines ObjectMeta as the metadata that all persisted resources. + // +optional + metav1.ObjectMeta `json:"metadata,omitempty"` + // spec defines the specification of desired alerting rule definitions for Prometheus. + // +required + Spec PrometheusRuleSpec `json:"spec"` + // status defines the status subresource. It is under active development and is updated only when the + // "StatusForConfigurationResources" feature gate is enabled. + // + // Most recent observed status of the PrometheusRule. Read-only. + // More info: + // https://github.com/kubernetes/community/blob/master/contributors/devel/sig-architecture/api-conventions.md#spec-and-status + // +optional + Status ConfigResourceStatus `json:"status,omitempty,omitzero"` +} + +// DeepCopyObject implements the runtime.Object interface. +func (f *PrometheusRule) DeepCopyObject() runtime.Object { + return f.DeepCopy() +} + +func (f *PrometheusRule) Bindings() []WorkloadBinding { + return f.Status.Bindings +} + +// PrometheusRuleSpec contains specification parameters for a Rule. +// +k8s:openapi-gen=true +type PrometheusRuleSpec struct { + // groups defines the content of Prometheus rule file + // +listType=map + // +listMapKey=name + // +optional + Groups []RuleGroup `json:"groups,omitempty"` +} + +// RuleGroup and Rule are copied instead of vendored because the +// upstream Prometheus struct definitions don't have json struct tags. + +// RuleGroup is a list of sequentially evaluated recording and alerting rules. +// +k8s:openapi-gen=true +type RuleGroup struct { + // name defines the name of the rule group. + // +kubebuilder:validation:MinLength=1 + // +required + Name string `json:"name"` + // labels define the labels to add or overwrite before storing the result for its rules. + // The labels defined at the rule level take precedence. + // + // It requires Prometheus >= 3.0.0. + // The field is ignored for Thanos Ruler. + // +optional + //nolint:kubeapilinter + Labels map[string]string `json:"labels,omitempty"` + // interval defines how often rules in the group are evaluated. + // +optional + Interval *Duration `json:"interval,omitempty"` + // query_offset defines the offset the rule evaluation timestamp of this particular group by the specified duration into the past. + // + // It requires Prometheus >= v2.53.0. + // It is not supported for ThanosRuler. + // +optional + //nolint:kubeapilinter // The json tag doesn't meet the conventions to be compatible with Prometheus format. + QueryOffset *Duration `json:"query_offset,omitempty"` + // rules defines the list of alerting and recording rules. + // +optional + Rules []Rule `json:"rules,omitempty"` + // partial_response_strategy is only used by ThanosRuler and will + // be ignored by Prometheus instances. + // More info: https://github.com/thanos-io/thanos/blob/main/docs/components/rule.md#partial-response + // +kubebuilder:validation:Pattern="^(?i)(abort|warn)?$" + // +optional + //nolint:kubeapilinter // The json tag doesn't meet the conventions to be compatible with Prometheus format. + PartialResponseStrategy string `json:"partial_response_strategy,omitempty"` + // limit defines the number of alerts an alerting rule and series a recording + // rule can produce. + // Limit is supported starting with Prometheus >= 2.31 and Thanos Ruler >= 0.24. + // +optional + Limit *int `json:"limit,omitempty"` +} + +// Rule describes an alerting or recording rule +// See Prometheus documentation: [alerting](https://www.prometheus.io/docs/prometheus/latest/configuration/alerting_rules/) or [recording](https://www.prometheus.io/docs/prometheus/latest/configuration/recording_rules/#recording-rules) rule +// +k8s:openapi-gen=true +// +kubebuilder:validation:OneOf=Record,Alert +type Rule struct { + // record defines the name of the time series to output to. Must be a valid metric name. + // Only one of `record` and `alert` must be set. + // +optional + Record string `json:"record,omitempty"` + // alert defines the name of the alert. Must be a valid label value. + // Only one of `record` and `alert` must be set. + // +optional + Alert string `json:"alert,omitempty"` + // expr defines the PromQL expression to evaluate. + // +required + Expr intstr.IntOrString `json:"expr"` + // for defines how alerts are considered firing once they have been returned for this long. + // +optional + For *Duration `json:"for,omitempty"` + // keep_firing_for defines how long an alert will continue firing after the condition that triggered it has cleared. + // +optional + //nolint:kubeapilinter // The json tag doesn't meet the conventions to be compatible with Prometheus format. + KeepFiringFor *NonEmptyDuration `json:"keep_firing_for,omitempty"` + // labels defines labels to add or overwrite. + // +optional + //nolint:kubeapilinter + Labels map[string]string `json:"labels,omitempty"` + // annotations defines annotations to add to each alert. + // Only valid for alerting rules. + // +optional + //nolint:kubeapilinter + Annotations map[string]string `json:"annotations,omitempty"` +} + +// PrometheusRuleList is a list of PrometheusRules. +// +k8s:openapi-gen=true +type PrometheusRuleList struct { + // TypeMeta defines the versioned schema of this representation of an object. + metav1.TypeMeta `json:",inline"` + // metadata defines ListMeta as metadata for collection responses. + // +optional + metav1.ListMeta `json:"metadata,omitempty"` + // List of Rules + // +required + Items []PrometheusRule `json:"items"` +} + +// DeepCopyObject implements the runtime.Object interface. +func (l *PrometheusRuleList) DeepCopyObject() runtime.Object { + return l.DeepCopy() +} diff --git a/vendor/github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring/v1/register.go b/vendor/github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring/v1/register.go new file mode 100644 index 0000000000..37786147ab --- /dev/null +++ b/vendor/github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring/v1/register.go @@ -0,0 +1,67 @@ +// Copyright 2018 The prometheus-operator Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package v1 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" + + "github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring" +) + +// SchemeGroupVersion is the group version used to register these objects +var SchemeGroupVersion = schema.GroupVersion{Group: monitoring.GroupName, Version: Version} + +// Resource takes an unqualified resource and returns a Group qualified GroupResource +func Resource(resource string) schema.GroupResource { + return SchemeGroupVersion.WithResource(resource).GroupResource() +} + +var ( + // localSchemeBuilder and AddToScheme will stay in k8s.io/kubernetes. + SchemeBuilder runtime.SchemeBuilder + localSchemeBuilder = &SchemeBuilder + AddToScheme = localSchemeBuilder.AddToScheme +) + +func init() { + // We only register manually written functions here. The registration of the + // generated functions takes place in the generated files. The separation + // makes the code compile even when the generated files are missing. + localSchemeBuilder.Register(addKnownTypes) +} + +// Adds the list of known types to api.Scheme. +func addKnownTypes(scheme *runtime.Scheme) error { + scheme.AddKnownTypes(SchemeGroupVersion, + &Prometheus{}, + &PrometheusList{}, + &ServiceMonitor{}, + &ServiceMonitorList{}, + &PodMonitor{}, + &PodMonitorList{}, + &Probe{}, + &ProbeList{}, + &Alertmanager{}, + &AlertmanagerList{}, + &PrometheusRule{}, + &PrometheusRuleList{}, + &ThanosRuler{}, + &ThanosRulerList{}, + ) + metav1.AddToGroupVersion(scheme, SchemeGroupVersion) + return nil +} diff --git a/vendor/github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring/v1/servicemonitor_types.go b/vendor/github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring/v1/servicemonitor_types.go new file mode 100644 index 0000000000..90bcd79974 --- /dev/null +++ b/vendor/github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring/v1/servicemonitor_types.go @@ -0,0 +1,228 @@ +// Copyright 2018 The prometheus-operator Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package v1 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" +) + +const ( + ServiceMonitorsKind = "ServiceMonitor" + ServiceMonitorName = "servicemonitors" + ServiceMonitorKindKey = "servicemonitor" +) + +// +genclient +// +k8s:openapi-gen=true +// +kubebuilder:resource:categories="prometheus-operator",shortName="smon" +// +kubebuilder:subresource:status + +// The `ServiceMonitor` custom resource definition (CRD) defines how `Prometheus` and `PrometheusAgent` can scrape metrics from a group of services. +// Among other things, it allows to specify: +// * The services to scrape via label selectors. +// * The container ports to scrape. +// * Authentication credentials to use. +// * Target and metric relabeling. +// +// `Prometheus` and `PrometheusAgent` objects select `ServiceMonitor` objects using label and namespace selectors. +type ServiceMonitor struct { + // TypeMeta defines the versioned schema of this representation of an object. + metav1.TypeMeta `json:",inline"` + // metadata defines ObjectMeta as the metadata that all persisted resources. + // +optional + metav1.ObjectMeta `json:"metadata,omitempty"` + // spec defines the specification of desired Service selection for target discovery by + // Prometheus. + // +required + Spec ServiceMonitorSpec `json:"spec"` + // status defines the status subresource. It is under active development and is updated only when the + // "StatusForConfigurationResources" feature gate is enabled. + // + // Most recent observed status of the ServiceMonitor. Read-only. + // More info: + // https://github.com/kubernetes/community/blob/master/contributors/devel/sig-architecture/api-conventions.md#spec-and-status + // +optional + Status ConfigResourceStatus `json:"status,omitempty,omitzero"` +} + +// DeepCopyObject implements the runtime.Object interface. +func (l *ServiceMonitor) DeepCopyObject() runtime.Object { + return l.DeepCopy() +} + +func (l *ServiceMonitor) Bindings() []WorkloadBinding { + return l.Status.Bindings +} + +// ServiceMonitorSpec defines the specification parameters for a ServiceMonitor. +// +k8s:openapi-gen=true +type ServiceMonitorSpec struct { + // jobLabel selects the label from the associated Kubernetes `Service` + // object which will be used as the `job` label for all metrics. + // + // For example if `jobLabel` is set to `foo` and the Kubernetes `Service` + // object is labeled with `foo: bar`, then Prometheus adds the `job="bar"` + // label to all ingested metrics. + // + // If the value of this field is empty or if the label doesn't exist for + // the given Service, the `job` label of the metrics defaults to the name + // of the associated Kubernetes `Service`. + // +optional + JobLabel string `json:"jobLabel,omitempty"` + + // targetLabels defines the labels which are transferred from the + // associated Kubernetes `Service` object onto the ingested metrics. + // + // +optional + TargetLabels []string `json:"targetLabels,omitempty"` + // podTargetLabels defines the labels which are transferred from the + // associated Kubernetes `Pod` object onto the ingested metrics. + // + // +optional + PodTargetLabels []string `json:"podTargetLabels,omitempty"` + + // endpoints defines the list of endpoints part of this ServiceMonitor. + // Defines how to scrape metrics from Kubernetes [Endpoints](https://kubernetes.io/docs/concepts/services-networking/service/#endpoints) objects. + // In most cases, an Endpoints object is backed by a Kubernetes [Service](https://kubernetes.io/docs/concepts/services-networking/service/) object with the same name and labels. + // +required + Endpoints []Endpoint `json:"endpoints"` + + // selector defines the label selector to select the Kubernetes `Endpoints` objects to scrape metrics from. + // +required + Selector metav1.LabelSelector `json:"selector"` + + // selectorMechanism defines the mechanism used to select the endpoints to scrape. + // By default, the selection process relies on relabel configurations to filter the discovered targets. + // Alternatively, you can opt in for role selectors, which may offer better efficiency in large clusters. + // Which strategy is best for your use case needs to be carefully evaluated. + // + // It requires Prometheus >= v2.17.0. + // + // +optional + SelectorMechanism *SelectorMechanism `json:"selectorMechanism,omitempty"` + + // namespaceSelector defines in which namespace(s) Prometheus should discover the services. + // By default, the services are discovered in the same namespace as the `ServiceMonitor` object but it is possible to select pods across different/all namespaces. + // +optional + NamespaceSelector NamespaceSelector `json:"namespaceSelector,omitempty"` + + // sampleLimit defines a per-scrape limit on the number of scraped samples + // that will be accepted. + // + // +optional + SampleLimit *uint64 `json:"sampleLimit,omitempty"` + + // scrapeProtocols defines the protocols to negotiate during a scrape. It tells clients the + // protocols supported by Prometheus in order of preference (from most to least preferred). + // + // If unset, Prometheus uses its default value. + // + // It requires Prometheus >= v2.49.0. + // + // +listType=set + // +optional + ScrapeProtocols []ScrapeProtocol `json:"scrapeProtocols,omitempty"` + + // fallbackScrapeProtocol defines the protocol to use if a scrape returns blank, unparseable, or otherwise invalid Content-Type. + // + // It requires Prometheus >= v3.0.0. + // +optional + FallbackScrapeProtocol *ScrapeProtocol `json:"fallbackScrapeProtocol,omitempty"` + + // targetLimit defines a limit on the number of scraped targets that will + // be accepted. + // + // +optional + TargetLimit *uint64 `json:"targetLimit,omitempty"` + + // labelLimit defines the per-scrape limit on number of labels that will be accepted for a sample. + // + // It requires Prometheus >= v2.27.0. + // + // +optional + LabelLimit *uint64 `json:"labelLimit,omitempty"` + // labelNameLengthLimit defines the per-scrape limit on length of labels name that will be accepted for a sample. + // + // It requires Prometheus >= v2.27.0. + // + // +optional + LabelNameLengthLimit *uint64 `json:"labelNameLengthLimit,omitempty"` + // labelValueLengthLimit defines the per-scrape limit on length of labels value that will be accepted for a sample. + // + // It requires Prometheus >= v2.27.0. + // + // +optional + LabelValueLengthLimit *uint64 `json:"labelValueLengthLimit,omitempty"` + + // +optional + NativeHistogramConfig `json:",inline"` + + // keepDroppedTargets defines the per-scrape limit on the number of targets dropped by relabeling + // that will be kept in memory. 0 means no limit. + // + // It requires Prometheus >= v2.47.0. + // + // +optional + KeepDroppedTargets *uint64 `json:"keepDroppedTargets,omitempty"` + + // attachMetadata defines additional metadata which is added to the + // discovered targets. + // + // It requires Prometheus >= v2.37.0. + // + // +optional + AttachMetadata *AttachMetadata `json:"attachMetadata,omitempty"` + + // scrapeClass defines the scrape class to apply. + // +optional + // +kubebuilder:validation:MinLength=1 + ScrapeClassName *string `json:"scrapeClass,omitempty"` + + // bodySizeLimit when defined, bodySizeLimit specifies a job level limit on the size + // of uncompressed response body that will be accepted by Prometheus. + // + // It requires Prometheus >= v2.28.0. + // + // +optional + BodySizeLimit *ByteSize `json:"bodySizeLimit,omitempty"` + + // serviceDiscoveryRole defines the service discovery role used to discover targets. + // + // If set, the value should be either "Endpoints" or "EndpointSlice". + // Otherwise it defaults to the value defined in the + // Prometheus/PrometheusAgent resource. + // + // +optional + ServiceDiscoveryRole *ServiceDiscoveryRole `json:"serviceDiscoveryRole,omitempty"` +} + +// ServiceMonitorList is a list of ServiceMonitors. +// +k8s:openapi-gen=true +type ServiceMonitorList struct { + // TypeMeta defines the versioned schema of this representation of an object + metav1.TypeMeta `json:",inline"` + // metadata defines ListMeta as metadata for collection responses. + // +optional + metav1.ListMeta `json:"metadata,omitempty"` + // List of ServiceMonitors + // +required + Items []ServiceMonitor `json:"items"` +} + +// DeepCopyObject implements the runtime.Object interface. +func (l *ServiceMonitorList) DeepCopyObject() runtime.Object { + return l.DeepCopy() +} diff --git a/vendor/github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring/v1/thanos_types.go b/vendor/github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring/v1/thanos_types.go new file mode 100644 index 0000000000..49dd72f0bd --- /dev/null +++ b/vendor/github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring/v1/thanos_types.go @@ -0,0 +1,611 @@ +// Copyright 2020 The prometheus-operator Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package v1 + +import ( + v1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" +) + +const ( + ThanosRulerKind = "ThanosRuler" + ThanosRulerName = "thanosrulers" + ThanosRulerKindKey = "thanosrulers" +) + +// +genclient +// +k8s:openapi-gen=true +// +kubebuilder:resource:categories="prometheus-operator",shortName="ruler" +// +kubebuilder:printcolumn:name="Version",type="string",JSONPath=".spec.version",description="The version of Thanos Ruler" +// +kubebuilder:printcolumn:name="Replicas",type="integer",JSONPath=".spec.replicas",description="The number of desired replicas" +// +kubebuilder:printcolumn:name="Ready",type="integer",JSONPath=".status.availableReplicas",description="The number of ready replicas" +// +kubebuilder:printcolumn:name="Reconciled",type="string",JSONPath=".status.conditions[?(@.type == 'Reconciled')].status" +// +kubebuilder:printcolumn:name="Available",type="string",JSONPath=".status.conditions[?(@.type == 'Available')].status" +// +kubebuilder:printcolumn:name="Age",type="date",JSONPath=".metadata.creationTimestamp" +// +kubebuilder:printcolumn:name="Paused",type="boolean",JSONPath=".status.paused",description="Whether the resource reconciliation is paused or not",priority=1 +// +kubebuilder:subresource:status + +// The `ThanosRuler` custom resource definition (CRD) defines a desired [Thanos Ruler](https://github.com/thanos-io/thanos/blob/main/docs/components/rule.md) setup to run in a Kubernetes cluster. +// +// A `ThanosRuler` instance requires at least one compatible Prometheus API endpoint (either Thanos Querier or Prometheus services). +// +// The resource defines via label and namespace selectors which `PrometheusRule` objects should be associated to the deployed Thanos Ruler instances. +type ThanosRuler struct { + // TypeMeta defines the versioned schema of this representation of an object. + metav1.TypeMeta `json:",inline"` + // metadata defines ObjectMeta as the metadata that all persisted resources. + // +optional + metav1.ObjectMeta `json:"metadata,omitempty"` + // spec defines the specification of the desired behavior of the ThanosRuler cluster. More info: + // https://github.com/kubernetes/community/blob/master/contributors/devel/sig-architecture/api-conventions.md#spec-and-status + // +required + Spec ThanosRulerSpec `json:"spec"` + // status defines the most recent observed status of the ThanosRuler cluster. Read-only. + // More info: + // https://github.com/kubernetes/community/blob/master/contributors/devel/sig-architecture/api-conventions.md#spec-and-status + // +optional + Status ThanosRulerStatus `json:"status,omitempty"` +} + +// ThanosRulerList is a list of ThanosRulers. +// +k8s:openapi-gen=true +type ThanosRulerList struct { + // TypeMeta defines the versioned schema of this representation of an object. + metav1.TypeMeta `json:",inline"` + // metadata defines ListMeta as metadata for collection responses. + // +optional + metav1.ListMeta `json:"metadata,omitempty"` + // List of Prometheuses + // +required + Items []ThanosRuler `json:"items"` +} + +// ThanosRulerSpec is a specification of the desired behavior of the ThanosRuler. More info: +// https://github.com/kubernetes/community/blob/master/contributors/devel/sig-architecture/api-conventions.md#spec-and-status +// +k8s:openapi-gen=true +type ThanosRulerSpec struct { + // version of Thanos to be deployed. + // +optional + Version *string `json:"version,omitempty"` + + // podMetadata defines labels and annotations which are propagated to the ThanosRuler pods. + // + // The following items are reserved and cannot be overridden: + // * "app.kubernetes.io/name" label, set to "thanos-ruler". + // * "app.kubernetes.io/managed-by" label, set to "prometheus-operator". + // * "app.kubernetes.io/instance" label, set to the name of the ThanosRuler instance. + // * "thanos-ruler" label, set to the name of the ThanosRuler instance. + // * "kubectl.kubernetes.io/default-container" annotation, set to "thanos-ruler". + // +optional + PodMetadata *EmbeddedObjectMetadata `json:"podMetadata,omitempty"` + + // image defines Thanos container image URL. + // +optional + Image string `json:"image,omitempty"` + + // imagePullPolicy defines for the 'thanos', 'init-config-reloader' and 'config-reloader' containers. + // See https://kubernetes.io/docs/concepts/containers/images/#image-pull-policy for more details. + // +kubebuilder:validation:Enum="";Always;Never;IfNotPresent + // +optional + ImagePullPolicy v1.PullPolicy `json:"imagePullPolicy,omitempty"` + + // imagePullSecrets defines an optional list of references to secrets in the same namespace + // to use for pulling thanos images from registries + // see http://kubernetes.io/docs/user-guide/images#specifying-imagepullsecrets-on-a-pod + // +optional + ImagePullSecrets []v1.LocalObjectReference `json:"imagePullSecrets,omitempty"` + + // paused defines when a ThanosRuler deployment is paused, no actions except for deletion + // will be performed on the underlying objects. + // +optional + Paused bool `json:"paused,omitempty"` // nolint:kubeapilinter + + // replicas defines the number of thanos ruler instances to deploy. + // +optional + Replicas *int32 `json:"replicas,omitempty"` + + // nodeSelector defines which Nodes the Pods are scheduled on. + // +optional + //nolint:kubeapilinter + NodeSelector map[string]string `json:"nodeSelector,omitempty"` + + // resources defines the resource requirements for single Pods. + // If not provided, no requests/limits will be set + // +optional + Resources v1.ResourceRequirements `json:"resources,omitempty"` + + // affinity defines when specified, the pod's scheduling constraints. + // +optional + Affinity *v1.Affinity `json:"affinity,omitempty"` + + // tolerations defines when specified, the pod's tolerations. + // +optional + Tolerations []v1.Toleration `json:"tolerations,omitempty"` + + // topologySpreadConstraints defines the pod's topology spread constraints. + // +optional + TopologySpreadConstraints []v1.TopologySpreadConstraint `json:"topologySpreadConstraints,omitempty"` + + // securityContext defines the pod-level security attributes and common container settings. + // This defaults to the default PodSecurityContext. + // +optional + SecurityContext *v1.PodSecurityContext `json:"securityContext,omitempty"` + + // dnsPolicy defines the DNS policy for the pods. + // + // +optional + DNSPolicy *DNSPolicy `json:"dnsPolicy,omitempty"` + // dnsConfig defines Defines the DNS configuration for the pods. + // + // +optional + DNSConfig *PodDNSConfig `json:"dnsConfig,omitempty"` + + // enableServiceLinks defines whether information about services should be injected into pod's environment variables + // +optional + EnableServiceLinks *bool `json:"enableServiceLinks,omitempty"` // nolint:kubeapilinter + + // priorityClassName defines the priority class assigned to the Pods + // +optional + PriorityClassName string `json:"priorityClassName,omitempty"` + + // serviceName defines the name of the service name used by the underlying StatefulSet(s) as the governing service. + // If defined, the Service must be created before the ThanosRuler resource in the same namespace and it must define a selector that matches the pod labels. + // If empty, the operator will create and manage a headless service named `thanos-ruler-operated` for ThanosRuler resources. + // When deploying multiple ThanosRuler resources in the same namespace, it is recommended to specify a different value for each. + // See https://kubernetes.io/docs/concepts/workloads/controllers/statefulset/#stable-network-id for more details. + // +optional + // +kubebuilder:validation:MinLength=1 + ServiceName *string `json:"serviceName,omitempty"` + + // serviceAccountName defines the name of the ServiceAccount to use to run the + // Thanos Ruler Pods. + // +optional + ServiceAccountName string `json:"serviceAccountName,omitempty"` + + // storage defines the specification of how storage shall be used. + // +optional + Storage *StorageSpec `json:"storage,omitempty"` + + // volumes defines how configuration of additional volumes on the output StatefulSet definition. Volumes specified will + // be appended to other volumes that are generated as a result of StorageSpec objects. + // +optional + Volumes []v1.Volume `json:"volumes,omitempty"` + // volumeMounts defines how the configuration of additional VolumeMounts on the output StatefulSet definition. + // VolumeMounts specified will be appended to other VolumeMounts in the ruler container, + // that are generated as a result of StorageSpec objects. + // +optional + VolumeMounts []v1.VolumeMount `json:"volumeMounts,omitempty"` + + // objectStorageConfig defines the configuration format is defined at https://thanos.io/tip/thanos/storage.md/#configuring-access-to-object-storage + // + // The operator performs no validation of the configuration. + // + // `objectStorageConfigFile` takes precedence over this field. + // + // +optional + ObjectStorageConfig *v1.SecretKeySelector `json:"objectStorageConfig,omitempty"` + // objectStorageConfigFile defines the path of the object storage configuration file. + // + // The configuration format is defined at https://thanos.io/tip/thanos/storage.md/#configuring-access-to-object-storage + // + // The operator performs no validation of the configuration file. + // + // This field takes precedence over `objectStorageConfig`. + // + // +optional + ObjectStorageConfigFile *string `json:"objectStorageConfigFile,omitempty"` + + // listenLocal defines the Thanos ruler listen on loopback, so that it + // does not bind against the Pod IP. + // +optional + ListenLocal bool `json:"listenLocal,omitempty"` // nolint:kubeapilinter + + // podManagementPolicy defines the policy for creating/deleting pods when + // scaling up and down. + // + // Unlike the default StatefulSet behavior, the default policy is + // `Parallel` to avoid manual intervention in case a pod gets stuck during + // a rollout. + // + // Note that updating this value implies the recreation of the StatefulSet + // which incurs a service outage. + // + // +optional + PodManagementPolicy *PodManagementPolicyType `json:"podManagementPolicy,omitempty"` + + // updateStrategy indicates the strategy that will be employed to update + // Pods in the StatefulSet when a revision is made to statefulset's Pod + // Template. + // + // The default strategy is RollingUpdate. + // + // +optional + UpdateStrategy *StatefulSetUpdateStrategy `json:"updateStrategy,omitempty"` + + // queryEndpoints defines the list of Thanos Query endpoints from which to query metrics. + // + // For Thanos >= v0.11.0, it is recommended to use `queryConfig` instead. + // + // `queryConfig` takes precedence over this field. + // + // +optional + QueryEndpoints []string `json:"queryEndpoints,omitempty"` + + // queryConfig defines the list of Thanos Query endpoints from which to query metrics. + // + // The configuration format is defined at https://thanos.io/tip/components/rule.md/#query-api + // + // It requires Thanos >= v0.11.0. + // + // The operator performs no validation of the configuration. + // + // This field takes precedence over `queryEndpoints`. + // + // +optional + QueryConfig *v1.SecretKeySelector `json:"queryConfig,omitempty"` + + // alertmanagersUrl defines the list of Alertmanager endpoints to send alerts to. + // + // For Thanos >= v0.10.0, it is recommended to use `alertmanagersConfig` instead. + // + // `alertmanagersConfig` takes precedence over this field. + // + // +optional + AlertManagersURL []string `json:"alertmanagersUrl,omitempty"` + // alertmanagersConfig defines the list of Alertmanager endpoints to send alerts to. + // + // The configuration format is defined at https://thanos.io/tip/components/rule.md/#alertmanager. + // + // It requires Thanos >= v0.10.0. + // + // The operator performs no validation of the configuration. + // + // This field takes precedence over `alertmanagersUrl`. + // + // +optional + AlertManagersConfig *v1.SecretKeySelector `json:"alertmanagersConfig,omitempty"` + + // ruleSelector defines the PrometheusRule objects to be selected for rule evaluation. An empty + // label selector matches all objects. A null label selector matches no + // objects. + // + // +optional + RuleSelector *metav1.LabelSelector `json:"ruleSelector,omitempty"` + // ruleNamespaceSelector defines the namespaces to be selected for Rules discovery. If unspecified, only + // the same namespace as the ThanosRuler object is in is used. + // + // +optional + RuleNamespaceSelector *metav1.LabelSelector `json:"ruleNamespaceSelector,omitempty"` + + // enforcedNamespaceLabel enforces adding a namespace label of origin for each alert + // and metric that is user created. The label value will always be the namespace of the object that is + // being created. + // +optional + EnforcedNamespaceLabel string `json:"enforcedNamespaceLabel,omitempty"` + // excludedFromEnforcement defines the list of references to PrometheusRule objects + // to be excluded from enforcing a namespace label of origin. + // Applies only if enforcedNamespaceLabel set to true. + // +optional + ExcludedFromEnforcement []ObjectReference `json:"excludedFromEnforcement,omitempty"` + // prometheusRulesExcludedFromEnforce defines a list of Prometheus rules to be excluded from enforcing + // of adding namespace labels. Works only if enforcedNamespaceLabel set to true. + // Make sure both ruleNamespace and ruleName are set for each pair + // Deprecated: use excludedFromEnforcement instead. + // +optional + PrometheusRulesExcludedFromEnforce []PrometheusRuleExcludeConfig `json:"prometheusRulesExcludedFromEnforce,omitempty"` + + // logLevel for ThanosRuler to be configured with. + // +kubebuilder:validation:Enum="";debug;info;warn;error + // +optional + LogLevel string `json:"logLevel,omitempty"` + // logFormat for ThanosRuler to be configured with. + // +kubebuilder:validation:Enum="";logfmt;json + // +optional + LogFormat string `json:"logFormat,omitempty"` + + // portName defines the port name used for the pods and governing service. + // Defaults to `web`. + // +kubebuilder:default:="web" + // +optional + PortName string `json:"portName,omitempty"` + + // evaluationInterval defines the interval between consecutive evaluations. + // +kubebuilder:default:="15s" + // +optional + EvaluationInterval Duration `json:"evaluationInterval,omitempty"` + + // resendDelay defines the minimum amount of time to wait before resending an alert to Alertmanager. + // +optional + ResendDelay *Duration `json:"resendDelay,omitempty"` + + // ruleOutageTolerance defines the max time to tolerate prometheus outage for restoring "for" state of alert. + // It requires Thanos >= v0.30.0. + // +optional + RuleOutageTolerance *Duration `json:"ruleOutageTolerance,omitempty"` + + // ruleQueryOffset defines the default rule group's query offset duration to use. + // It requires Thanos >= v0.38.0. + // +optional + RuleQueryOffset *Duration `json:"ruleQueryOffset,omitempty"` + + // ruleConcurrentEval defines how many rules can be evaluated concurrently. + // It requires Thanos >= v0.37.0. + // +kubebuilder:validation:Minimum=1 + // + // +optional + RuleConcurrentEval *int32 `json:"ruleConcurrentEval,omitempty"` + + // ruleGracePeriod defines the minimum duration between alert and restored "for" state. + // This is maintained only for alerts with configured "for" time greater than grace period. + // It requires Thanos >= v0.30.0. + // + // +optional + RuleGracePeriod *Duration `json:"ruleGracePeriod,omitempty"` + + // retention defines the time duration ThanosRuler shall retain data for. Default is '24h', and + // must match the regular expression `[0-9]+(ms|s|m|h|d|w|y)` (milliseconds + // seconds minutes hours days weeks years). + // + // The field has no effect when remote-write is configured since the Ruler + // operates in stateless mode. + // + // +kubebuilder:default:="24h" + // +optional + Retention Duration `json:"retention,omitempty"` + + // containers allows injecting additional containers or modifying operator generated + // containers. This can be used to allow adding an authentication proxy to a ThanosRuler pod or + // to change the behavior of an operator generated container. Containers described here modify + // an operator generated container if they share the same name and modifications are done via a + // strategic merge patch. The current container names are: `thanos-ruler` and `config-reloader`. + // Overriding containers is entirely outside the scope of what the maintainers will support and by doing + // so, you accept that this behaviour may break at any time without notice. + // +optional + Containers []v1.Container `json:"containers,omitempty"` + // initContainers allows adding initContainers to the pod definition. Those can be used to e.g. + // fetch secrets for injection into the ThanosRuler configuration from external sources. Any + // errors during the execution of an initContainer will lead to a restart of the Pod. + // More info: https://kubernetes.io/docs/concepts/workloads/pods/init-containers/ + // Using initContainers for any use case other then secret fetching is entirely outside the scope + // of what the maintainers will support and by doing so, you accept that this behaviour may break + // at any time without notice. + // +optional + InitContainers []v1.Container `json:"initContainers,omitempty"` + + // tracingConfig defines the tracing configuration. + // + // The configuration format is defined at https://thanos.io/tip/thanos/tracing.md/#configuration + // + // This is an *experimental feature*, it may change in any upcoming release + // in a breaking way. + // + // The operator performs no validation of the configuration. + // + // `tracingConfigFile` takes precedence over this field. + // + // +optional + TracingConfig *v1.SecretKeySelector `json:"tracingConfig,omitempty"` + // tracingConfigFile defines the path of the tracing configuration file. + // + // The configuration format is defined at https://thanos.io/tip/thanos/tracing.md/#configuration + // + // This is an *experimental feature*, it may change in any upcoming release + // in a breaking way. + // + // The operator performs no validation of the configuration file. + // + // This field takes precedence over `tracingConfig`. + // + // +optional + TracingConfigFile string `json:"tracingConfigFile,omitempty"` + + // labels defines the external label pairs of the ThanosRuler resource. + // + // A default replica label `thanos_ruler_replica` will be always added as a + // label with the value of the pod's name. + // + // +optional + //nolint:kubeapilinter + Labels map[string]string `json:"labels,omitempty"` + + // alertDropLabels defines the label names which should be dropped in Thanos Ruler + // alerts. + // + // The replica label `thanos_ruler_replica` will always be dropped from the alerts. + // + // +optional + AlertDropLabels []string `json:"alertDropLabels,omitempty"` + + // externalPrefix defines the Thanos Ruler instances will be available under. This is + // necessary to generate correct URLs. This is necessary if Thanos Ruler is not + // served from root of a DNS name. + // +optional + ExternalPrefix string `json:"externalPrefix,omitempty"` + // routePrefix defines the route prefix ThanosRuler registers HTTP handlers for. This allows thanos UI to be served on a sub-path. + // +optional + RoutePrefix string `json:"routePrefix,omitempty"` + + // grpcServerTlsConfig defines the gRPC server from which Thanos Querier reads + // recorded rule data. + // Note: Currently only the CAFile, CertFile, and KeyFile fields are supported. + // Maps to the '--grpc-server-tls-*' CLI args. + // +optional + GRPCServerTLSConfig *TLSConfig `json:"grpcServerTlsConfig,omitempty"` + + // alertQueryUrl defines how Thanos Ruler will set in the 'Source' field + // of all alerts. + // Maps to the '--alert.query-url' CLI arg. + // +optional + AlertQueryURL string `json:"alertQueryUrl,omitempty"` + + // minReadySeconds defines the minimum number of seconds for which a newly created pod should be ready + // without any of its container crashing for it to be considered available. + // + // If unset, pods will be considered available as soon as they are ready. + // + // +kubebuilder:validation:Minimum:=0 + // +optional + MinReadySeconds *int32 `json:"minReadySeconds,omitempty"` + + // alertRelabelConfigs defines the alert relabeling in Thanos Ruler. + // + // Alert relabel configuration must have the form as specified in the + // official Prometheus documentation: + // https://prometheus.io/docs/prometheus/latest/configuration/configuration/#alert_relabel_configs + // + // The operator performs no validation of the configuration. + // + // `alertRelabelConfigFile` takes precedence over this field. + // + // +optional + AlertRelabelConfigs *v1.SecretKeySelector `json:"alertRelabelConfigs,omitempty"` + // alertRelabelConfigFile defines the path to the alert relabeling configuration file. + // + // Alert relabel configuration must have the form as specified in the + // official Prometheus documentation: + // https://prometheus.io/docs/prometheus/latest/configuration/configuration/#alert_relabel_configs + // + // The operator performs no validation of the configuration file. + // + // This field takes precedence over `alertRelabelConfig`. + // + // +optional + AlertRelabelConfigFile *string `json:"alertRelabelConfigFile,omitempty"` + + // hostAliases defines pods' hostAliases configuration + // +listType=map + // +listMapKey=ip + // +optional + HostAliases []HostAlias `json:"hostAliases,omitempty"` + + // additionalArgs defines how to add additional arguments for the ThanosRuler container. + // It is intended for e.g. activating hidden flags which are not supported by + // the dedicated configuration options yet. The arguments are passed as-is to the + // ThanosRuler container which may cause issues if they are invalid or not supported + // by the given ThanosRuler version. + // In case of an argument conflict (e.g. an argument which is already set by the + // operator itself) or when providing an invalid argument the reconciliation will + // fail and an error will be logged. + // +optional + AdditionalArgs []Argument `json:"additionalArgs,omitempty"` + + // web defines the configuration of the ThanosRuler web server. + // +optional + Web *ThanosRulerWebSpec `json:"web,omitempty"` + + // remoteWrite defines the list of remote write configurations. + // + // When the list isn't empty, the ruler is configured with stateless mode. + // + // It requires Thanos >= 0.24.0. + // + // +optional + RemoteWrite []RemoteWriteSpec `json:"remoteWrite,omitempty"` + + // terminationGracePeriodSeconds defines the optional duration in seconds the pod needs to terminate gracefully. + // Value must be non-negative integer. The value zero indicates stop immediately via + // the kill signal (no opportunity to shut down) which may lead to data corruption. + // + // Defaults to 120 seconds. + // + // +kubebuilder:validation:Minimum:=0 + // +optional + TerminationGracePeriodSeconds *int64 `json:"terminationGracePeriodSeconds,omitempty"` + + // enableFeatures defines how to setup Thanos Ruler feature flags. By default, no features are enabled. + // + // Enabling features which are disabled by default is entirely outside the + // scope of what the maintainers will support and by doing so, you accept + // that this behaviour may break at any time without notice. + // + // For more information see https://thanos.io/tip/components/rule.md/ + // + // It requires Thanos >= 0.39.0. + // +listType:=set + // +optional + EnableFeatures []EnableFeature `json:"enableFeatures,omitempty"` + + // hostUsers supports the user space in Kubernetes. + // + // More info: https://kubernetes.io/docs/tasks/configure-pod-container/user-namespaces/ + // + // + // The feature requires at least Kubernetes 1.28 with the `UserNamespacesSupport` feature gate enabled. + // Starting Kubernetes 1.33, the feature is enabled by default. + // + // +optional + HostUsers *bool `json:"hostUsers,omitempty"` // nolint:kubeapilinter +} + +// ThanosRulerWebSpec defines the configuration of the ThanosRuler web server. +// +k8s:openapi-gen=true +type ThanosRulerWebSpec struct { + // +optional + WebConfigFileFields `json:",inline"` +} + +// ThanosRulerStatus is the most recent observed status of the ThanosRuler. Read-only. +// More info: +// https://github.com/kubernetes/community/blob/master/contributors/devel/sig-architecture/api-conventions.md#spec-and-status +// +k8s:openapi-gen=true +type ThanosRulerStatus struct { + // paused defines whether any actions on the underlying managed objects are + // being performed. Only delete actions will be performed. + // +optional + Paused bool `json:"paused"` // nolint:kubeapilinter + // replicas defines the total number of non-terminated pods targeted by this ThanosRuler deployment + // (their labels match the selector). + // +optional + Replicas int32 `json:"replicas"` + // updatedReplicas defines the total number of non-terminated pods targeted by this ThanosRuler deployment + // that have the desired version spec. + // +optional + UpdatedReplicas int32 `json:"updatedReplicas"` + // availableReplicas defines the total number of available pods (ready for at least minReadySeconds) + // targeted by this ThanosRuler deployment. + // +optional + AvailableReplicas int32 `json:"availableReplicas"` + // unavailableReplicas defines the total number of unavailable pods targeted by this ThanosRuler deployment. + // +optional + UnavailableReplicas int32 `json:"unavailableReplicas"` + // conditions defines the current state of the ThanosRuler object. + // +listType=map + // +listMapKey=type + // +optional + Conditions []Condition `json:"conditions,omitempty"` +} + +func (tr *ThanosRuler) ExpectedReplicas() int { + if tr.Spec.Replicas == nil { + return 1 + } + return int(*tr.Spec.Replicas) +} + +func (tr *ThanosRuler) SetReplicas(i int) { tr.Status.Replicas = int32(i) } +func (tr *ThanosRuler) SetUpdatedReplicas(i int) { tr.Status.UpdatedReplicas = int32(i) } +func (tr *ThanosRuler) SetAvailableReplicas(i int) { tr.Status.AvailableReplicas = int32(i) } +func (tr *ThanosRuler) SetUnavailableReplicas(i int) { tr.Status.UnavailableReplicas = int32(i) } + +// DeepCopyObject implements the runtime.Object interface. +func (l *ThanosRuler) DeepCopyObject() runtime.Object { + return l.DeepCopy() +} + +// DeepCopyObject implements the runtime.Object interface. +func (l *ThanosRulerList) DeepCopyObject() runtime.Object { + return l.DeepCopy() +} diff --git a/vendor/github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring/v1/tls_types.go b/vendor/github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring/v1/tls_types.go new file mode 100644 index 0000000000..51e6835279 --- /dev/null +++ b/vendor/github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring/v1/tls_types.go @@ -0,0 +1,173 @@ +// Copyright 2025 The prometheus-operator Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package v1 + +import ( + "fmt" + "reflect" + "strings" + + v1 "k8s.io/api/core/v1" +) + +// +kubebuilder:validation:Enum=TLS10;TLS11;TLS12;TLS13 +type TLSVersion string + +const ( + TLSVersion10 TLSVersion = "TLS10" + TLSVersion11 TLSVersion = "TLS11" + TLSVersion12 TLSVersion = "TLS12" + TLSVersion13 TLSVersion = "TLS13" +) + +// TLSConfig defines full TLS configuration. +type TLSConfig struct { + SafeTLSConfig `json:",inline"` + TLSFilesConfig `json:",inline"` +} + +// Validate semantically validates the given TLSConfig. +func (c *TLSConfig) Validate() error { + if c == nil { + return nil + } + + if !reflect.ValueOf(c.CA).IsZero() { + if c.CAFile != "" { + return fmt.Errorf("cannot specify both 'caFile' and 'ca'") + } + + if err := c.CA.Validate(); err != nil { + return fmt.Errorf("ca: %w", err) + } + } + + hasCert := !reflect.ValueOf(c.Cert).IsZero() + if hasCert { + if c.CertFile != "" { + return fmt.Errorf("cannot specify both 'certFile' and 'cert'") + } + + if err := c.Cert.Validate(); err != nil { + return fmt.Errorf("cert: %w", err) + } + } + + if c.KeyFile != "" && c.KeySecret != nil { + return fmt.Errorf("cannot specify both 'keyFile' and 'keySecret'") + } + + hasCert = hasCert || c.CertFile != "" + hasKey := c.KeyFile != "" || c.KeySecret != nil + + if hasCert && !hasKey { + return fmt.Errorf("cannot specify client cert without client key") + } + + if hasKey && !hasCert { + return fmt.Errorf("cannot specify client key without client cert") + } + + if c.MaxVersion != nil && c.MinVersion != nil && strings.Compare(string(*c.MaxVersion), string(*c.MinVersion)) == -1 { + return fmt.Errorf("'maxVersion' must greater than or equal to 'minVersion'") + } + + return nil +} + +// SafeTLSConfig defines safe TLS configurations. +// +k8s:openapi-gen=true +type SafeTLSConfig struct { + // ca defines the Certificate authority used when verifying server certificates. + // +optional + CA SecretOrConfigMap `json:"ca,omitempty"` + + // cert defines the Client certificate to present when doing client-authentication. + // +optional + Cert SecretOrConfigMap `json:"cert,omitempty"` + + // keySecret defines the Secret containing the client key file for the targets. + // +optional + KeySecret *v1.SecretKeySelector `json:"keySecret,omitempty"` + + // serverName is used to verify the hostname for the targets. + // +optional + ServerName *string `json:"serverName,omitempty"` + + // insecureSkipVerify defines how to disable target certificate validation. + // +optional + InsecureSkipVerify *bool `json:"insecureSkipVerify,omitempty"` // nolint:kubeapilinter + + // minVersion defines the minimum acceptable TLS version. + // + // It requires Prometheus >= v2.35.0 or Thanos >= v0.28.0. + // +optional + MinVersion *TLSVersion `json:"minVersion,omitempty"` + + // maxVersion defines the maximum acceptable TLS version. + // + // It requires Prometheus >= v2.41.0 or Thanos >= v0.31.0. + // +optional + MaxVersion *TLSVersion `json:"maxVersion,omitempty"` +} + +// Validate semantically validates the given SafeTLSConfig. +func (c *SafeTLSConfig) Validate() error { + if c == nil { + return nil + } + + if c.CA != (SecretOrConfigMap{}) { + if err := c.CA.Validate(); err != nil { + return fmt.Errorf("ca %s: %w", c.CA.String(), err) + } + } + + if c.Cert != (SecretOrConfigMap{}) { + if err := c.Cert.Validate(); err != nil { + return fmt.Errorf("cert %s: %w", c.Cert.String(), err) + } + } + + if c.Cert != (SecretOrConfigMap{}) && c.KeySecret == nil { + return fmt.Errorf("client cert specified without client key") + } + + if c.KeySecret != nil && c.Cert == (SecretOrConfigMap{}) { + return fmt.Errorf("client key specified without client cert") + } + + if c.MaxVersion != nil && c.MinVersion != nil && strings.Compare(string(*c.MaxVersion), string(*c.MinVersion)) == -1 { + return fmt.Errorf("maxVersion must more than or equal to minVersion") + } + + return nil +} + +// TLSFilesConfig extends the TLS configuration with file parameters. +// +k8s:openapi-gen=true +type TLSFilesConfig struct { + // caFile defines the path to the CA cert in the Prometheus container to use for the targets. + // +optional + CAFile string `json:"caFile,omitempty"` + // certFile defines the path to the client cert file in the Prometheus container for the targets. + // +optional + CertFile string `json:"certFile,omitempty"` + // keyFile defines the path to the client key file in the Prometheus container for the targets. + // +optional + KeyFile string `json:"keyFile,omitempty"` +} + +// diff --git a/vendor/github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring/v1/types.go b/vendor/github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring/v1/types.go new file mode 100644 index 0000000000..2f1cd1f1e6 --- /dev/null +++ b/vendor/github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring/v1/types.go @@ -0,0 +1,1092 @@ +// Copyright 2018 The prometheus-operator Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package v1 + +import ( + "errors" + "fmt" + "net/url" + "reflect" + "strings" + + v1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/api/resource" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/apimachinery/pkg/util/intstr" + + "github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring" +) + +const ( + Version = "v1" +) + +// ByteSize is a valid memory size type based on powers-of-2, so 1KB is 1024B. +// Supported units: B, KB, KiB, MB, MiB, GB, GiB, TB, TiB, PB, PiB, EB, EiB Ex: `512MB`. +// +kubebuilder:validation:Pattern:="(^0|([0-9]*[.])?[0-9]+((K|M|G|T|E|P)i?)?B)$" +type ByteSize string + +func (bs *ByteSize) IsEmpty() bool { + return bs == nil || *bs == "" +} + +// Duration is a valid time duration that can be parsed by Prometheus model.ParseDuration() function. +// Supported units: y, w, d, h, m, s, ms +// Examples: `30s`, `1m`, `1h20m15s`, `15d` +// +kubebuilder:validation:Pattern:="^(0|(([0-9]+)y)?(([0-9]+)w)?(([0-9]+)d)?(([0-9]+)h)?(([0-9]+)m)?(([0-9]+)s)?(([0-9]+)ms)?)$" +type Duration string + +// DurationPointer is a helper function to parse a Duration string into a *Duration. +func DurationPointer(s string) *Duration { + d := Duration(s) + return &d +} + +// NonEmptyDuration is a valid time duration that can be parsed by Prometheus model.ParseDuration() function. +// Compared to Duration, NonEmptyDuration enforces a minimum length of 1. +// Supported units: y, w, d, h, m, s, ms +// Examples: `30s`, `1m`, `1h20m15s`, `15d` +// +kubebuilder:validation:Pattern:="^(0|(([0-9]+)y)?(([0-9]+)w)?(([0-9]+)d)?(([0-9]+)h)?(([0-9]+)m)?(([0-9]+)s)?(([0-9]+)ms)?)$" +// +kubebuilder:validation:MinLength=1 +type NonEmptyDuration string + +// GoDuration is a valid time duration that can be parsed by Go's time.ParseDuration() function. +// Supported units: h, m, s, ms +// Examples: `45ms`, `30s`, `1m`, `1h20m15s` +// +kubebuilder:validation:Pattern:="^(0|(([0-9]+)h)?(([0-9]+)m)?(([0-9]+)s)?(([0-9]+)ms)?)$" +type GoDuration string + +// HostAlias holds the mapping between IP and hostnames that will be injected as an entry in the +// pod's hosts file. +type HostAlias struct { + // ip defines the IP address of the host file entry. + // +required + IP string `json:"ip"` + // hostnames defines hostnames for the above IP address. + // +required + Hostnames []string `json:"hostnames"` +} + +// PrometheusRuleExcludeConfig enables users to configure excluded +// PrometheusRule names and their namespaces to be ignored while enforcing +// namespace label for alerts and metrics. +type PrometheusRuleExcludeConfig struct { + // ruleNamespace defines the namespace of the excluded PrometheusRule object. + // +required + RuleNamespace string `json:"ruleNamespace"` + // ruleName defines the name of the excluded PrometheusRule object. + // +required + RuleName string `json:"ruleName"` +} + +type ProxyConfig struct { + // proxyUrl defines the HTTP proxy server to use. + // + // +kubebuilder:validation:Pattern:="^(http|https|socks5)://.+$" + // +optional + ProxyURL *string `json:"proxyUrl,omitempty"` + // noProxy defines a comma-separated string that can contain IPs, CIDR notation, domain names + // that should be excluded from proxying. IP and domain names can + // contain port numbers. + // + // It requires Prometheus >= v2.43.0, Alertmanager >= v0.25.0 or Thanos >= v0.32.0. + // +optional + NoProxy *string `json:"noProxy,omitempty"` + // proxyFromEnvironment defines whether to use the proxy configuration defined by environment variables (HTTP_PROXY, HTTPS_PROXY, and NO_PROXY). + // + // It requires Prometheus >= v2.43.0, Alertmanager >= v0.25.0 or Thanos >= v0.32.0. + // +optional + ProxyFromEnvironment *bool `json:"proxyFromEnvironment,omitempty"` // nolint:kubeapilinter + // proxyConnectHeader optionally specifies headers to send to + // proxies during CONNECT requests. + // + // It requires Prometheus >= v2.43.0, Alertmanager >= v0.25.0 or Thanos >= v0.32.0. + // +optional + // +mapType:=atomic + ProxyConnectHeader map[string][]v1.SecretKeySelector `json:"proxyConnectHeader,omitempty"` //nolint:kubeapilinter +} + +// Validate semantically validates the given ProxyConfig. +func (pc *ProxyConfig) Validate() error { + if pc == nil { + return nil + } + + if reflect.ValueOf(pc).IsZero() { + return nil + } + + proxyFromEnvironmentDefined := pc.ProxyFromEnvironment != nil && *pc.ProxyFromEnvironment + proxyURLDefined := pc.ProxyURL != nil && *pc.ProxyURL != "" + noProxyDefined := pc.NoProxy != nil && *pc.NoProxy != "" + + if len(pc.ProxyConnectHeader) > 0 && (!proxyFromEnvironmentDefined && !proxyURLDefined) { + return fmt.Errorf("if proxyConnectHeader is configured, proxyUrl or proxyFromEnvironment must also be configured") + } + + if proxyFromEnvironmentDefined && proxyURLDefined { + return fmt.Errorf("if proxyFromEnvironment is configured, proxyUrl must not be configured") + } + + if proxyFromEnvironmentDefined && noProxyDefined { + return fmt.Errorf("if proxyFromEnvironment is configured, noProxy must not be configured") + } + + if !proxyURLDefined && noProxyDefined { + return fmt.Errorf("if noProxy is configured, proxyUrl must also be configured") + } + + for k, v := range pc.ProxyConnectHeader { + if len(v) == 0 { + return fmt.Errorf("proxyConnetHeader[%s]: selector must not be empty", k) + } + for i, sel := range v { + if sel == (v1.SecretKeySelector{}) { + return fmt.Errorf("proxyConnectHeader[%s][%d]: selector must be defined", k, i) + } + } + } + + if pc.ProxyURL != nil { + if _, err := url.Parse(*pc.ProxyURL); err != nil { + return err + } + } + return nil +} + +// ObjectReference references a PodMonitor, ServiceMonitor, Probe or PrometheusRule object. +type ObjectReference struct { + // group of the referent. When not specified, it defaults to `monitoring.coreos.com` + // +optional + // +kubebuilder:default:="monitoring.coreos.com" + // +kubebuilder:validation:Enum=monitoring.coreos.com + Group string `json:"group"` + // resource of the referent. + // +required + // +kubebuilder:validation:Enum=prometheusrules;servicemonitors;podmonitors;probes;scrapeconfigs + Resource string `json:"resource"` + // namespace of the referent. + // More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces/ + // +required + // +kubebuilder:validation:MinLength=1 + Namespace string `json:"namespace"` + // name of the referent. When not set, all resources in the namespace are matched. + // +optional + Name string `json:"name,omitempty"` +} + +func (obj *ObjectReference) GroupResource() schema.GroupResource { + return schema.GroupResource{ + Resource: obj.Resource, + Group: obj.getGroup(), + } +} + +func (obj *ObjectReference) GroupKind() schema.GroupKind { + return schema.GroupKind{ + Kind: monitoring.ResourceToKind(obj.Resource), + Group: obj.getGroup(), + } +} + +// getGroup returns the group of the object. +// It is mostly needed for tests which don't create objects through the API and don't benefit from the default value. +func (obj *ObjectReference) getGroup() string { + if obj.Group == "" { + return monitoring.GroupName + } + return obj.Group +} + +// ArbitraryFSAccessThroughSMsConfig enables users to configure, whether +// a service monitor selected by the Prometheus instance is allowed to use +// arbitrary files on the file system of the Prometheus container. This is the case +// when e.g. a service monitor specifies a BearerTokenFile in an endpoint. A +// malicious user could create a service monitor selecting arbitrary secret files +// in the Prometheus container. Those secrets would then be sent with a scrape +// request by Prometheus to a malicious target. Denying the above would prevent the +// attack, users can instead use the BearerTokenSecret field. +type ArbitraryFSAccessThroughSMsConfig struct { + // deny prevents service monitors from accessing arbitrary files on the file system. + // When true, service monitors cannot use file-based configurations like BearerTokenFile + // that could potentially access sensitive files. When false (default), such access is allowed. + // Setting this to true enhances security by preventing potential credential theft attacks. + // + // +optional + Deny bool `json:"deny,omitempty"` // nolint:kubeapilinter +} + +// Condition represents the state of the resources associated with the +// Prometheus, Alertmanager or ThanosRuler resource. +// +k8s:deepcopy-gen=true +type Condition struct { + // type of the condition being reported. + // +required + Type ConditionType `json:"type"` + // status of the condition. + // +required + Status ConditionStatus `json:"status"` + // lastTransitionTime is the time of the last update to the current status property. + // +required + LastTransitionTime metav1.Time `json:"lastTransitionTime"` + // reason for the condition's last transition. + // +optional + Reason string `json:"reason,omitempty"` + // message defines human-readable message indicating details for the condition's last transition. + // +optional + Message string `json:"message,omitempty"` + // observedGeneration defines the .metadata.generation that the + // condition was set based upon. For instance, if `.metadata.generation` is + // currently 12, but the `.status.conditions[].observedGeneration` is 9, the + // condition is out of date with respect to the current state of the + // instance. + // +optional + ObservedGeneration int64 `json:"observedGeneration,omitempty"` +} + +// +kubebuilder:validation:MinLength=1 +type ConditionType string + +const ( + // Available indicates whether enough pods are ready to provide the + // service. + // The possible status values for this condition type are: + // - True: all pods are running and ready, the service is fully available. + // - Degraded: some pods aren't ready, the service is partially available. + // - False: no pods are running, the service is totally unavailable. + // - Unknown: the operator couldn't determine the condition status. + Available ConditionType = "Available" + // Reconciled indicates whether the operator has reconciled the state of + // the underlying resources with the object's spec. + // The possible status values for this condition type are: + // - True: the reconciliation was successful. + // - False: the reconciliation failed. + // - Unknown: the operator couldn't determine the condition status. + Reconciled ConditionType = "Reconciled" + // Accepted indicates whether the workload controller has successfully accepted + // the configuration resource and updated the configuration of the workload accordingly. + // The possible status values for this condition type are: + // - True: the configuration resource was successfully accepted by the controller and written to the configuration secret. + // - False: the controller rejected the configuration due to an error. + // - Unknown: the operator couldn't determine the condition status. + Accepted ConditionType = "Accepted" +) + +// +kubebuilder:validation:MinLength=1 +type ConditionStatus string + +const ( + ConditionTrue ConditionStatus = "True" + ConditionDegraded ConditionStatus = "Degraded" + ConditionFalse ConditionStatus = "False" + ConditionUnknown ConditionStatus = "Unknown" +) + +// EmbeddedPersistentVolumeClaim is an embedded version of k8s.io/api/core/v1.PersistentVolumeClaim. +// It contains TypeMeta and a reduced ObjectMeta. +type EmbeddedPersistentVolumeClaim struct { + // TypeMeta defines the versioned schema of this representation of an object. + metav1.TypeMeta `json:",inline"` + // metadata defines EmbeddedMetadata contains metadata relevant to an EmbeddedResource. + // +optional + EmbeddedObjectMetadata `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + // spec defines the specification of the characteristics of a volume requested by a pod author. + // More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#persistentvolumeclaims + // +optional + Spec v1.PersistentVolumeClaimSpec `json:"spec,omitempty" protobuf:"bytes,2,opt,name=spec"` + // status is deprecated: this field is never set. + // +optional + Status v1.PersistentVolumeClaimStatus `json:"status,omitempty" protobuf:"bytes,3,opt,name=status"` +} + +// EmbeddedObjectMetadata contains a subset of the fields included in k8s.io/apimachinery/pkg/apis/meta/v1.ObjectMeta +// Only fields which are relevant to embedded resources are included. +type EmbeddedObjectMetadata struct { + // name must be unique within a namespace. Is required when creating resources, although + // some resources may allow a client to request the generation of an appropriate name + // automatically. Name is primarily intended for creation idempotence and configuration + // definition. + // Cannot be updated. + // More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/ + // +optional + Name string `json:"name,omitempty" protobuf:"bytes,1,opt,name=name"` + + // labels define the map of string keys and values that can be used to organize and categorize + // (scope and select) objects. May match selectors of replication controllers + // and services. + // More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/ + // +optional + //nolint:kubeapilinter + Labels map[string]string `json:"labels,omitempty" protobuf:"bytes,11,rep,name=labels"` + + // annotations defines an unstructured key value map stored with a resource that may be + // set by external tools to store and retrieve arbitrary metadata. They are not + // queryable and should be preserved when modifying objects. + // More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations/ + // +optional + //nolint:kubeapilinter + Annotations map[string]string `json:"annotations,omitempty" protobuf:"bytes,12,rep,name=annotations"` +} + +// WebConfigFileFields defines the file content for --web.config.file flag. +// +k8s:deepcopy-gen=true +type WebConfigFileFields struct { + // tlsConfig defines the TLS parameters for HTTPS. + // +optional + TLSConfig *WebTLSConfig `json:"tlsConfig,omitempty"` + // httpConfig defines HTTP parameters for web server. + // +optional + HTTPConfig *WebHTTPConfig `json:"httpConfig,omitempty"` +} + +// WebHTTPConfig defines HTTP parameters for web server. +// +k8s:openapi-gen=true +type WebHTTPConfig struct { + // http2 enable HTTP/2 support. Note that HTTP/2 is only supported with TLS. + // When TLSConfig is not configured, HTTP/2 will be disabled. + // Whenever the value of the field changes, a rolling update will be triggered. + // +optional + HTTP2 *bool `json:"http2,omitempty"` // nolint:kubeapilinter + // headers defines a list of headers that can be added to HTTP responses. + // +optional + Headers *WebHTTPHeaders `json:"headers,omitempty"` +} + +// WebHTTPHeaders defines the list of headers that can be added to HTTP responses. +// +k8s:openapi-gen=true +type WebHTTPHeaders struct { + // contentSecurityPolicy defines the Content-Security-Policy header to HTTP responses. + // Unset if blank. + // +optional + ContentSecurityPolicy string `json:"contentSecurityPolicy,omitempty"` + // xFrameOptions defines the X-Frame-Options header to HTTP responses. + // Unset if blank. Accepted values are deny and sameorigin. + // https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/X-Frame-Options + // +kubebuilder:validation:Enum="";Deny;SameOrigin + // +optional + XFrameOptions string `json:"xFrameOptions,omitempty"` + // xContentTypeOptions defines the X-Content-Type-Options header to HTTP responses. + // Unset if blank. Accepted value is nosniff. + // https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/X-Content-Type-Options + // +kubebuilder:validation:Enum="";NoSniff + // +optional + XContentTypeOptions string `json:"xContentTypeOptions,omitempty"` + // xXSSProtection defines the X-XSS-Protection header to all responses. + // Unset if blank. + // https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/X-XSS-Protection + // +optional + XXSSProtection string `json:"xXSSProtection,omitempty"` + // strictTransportSecurity defines the Strict-Transport-Security header to HTTP responses. + // Unset if blank. + // Please make sure that you use this with care as this header might force + // browsers to load Prometheus and the other applications hosted on the same + // domain and subdomains over HTTPS. + // https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/Strict-Transport-Security + // +optional + StrictTransportSecurity string `json:"strictTransportSecurity,omitempty"` +} + +// WebTLSConfig defines the TLS parameters for HTTPS. +// +k8s:openapi-gen=true +type WebTLSConfig struct { + // cert defines the Secret or ConfigMap containing the TLS certificate for the web server. + // + // Either `keySecret` or `keyFile` must be defined. + // + // It is mutually exclusive with `certFile`. + // + // +optional + Cert SecretOrConfigMap `json:"cert,omitempty"` + // certFile defines the path to the TLS certificate file in the container for the web server. + // + // Either `keySecret` or `keyFile` must be defined. + // + // It is mutually exclusive with `cert`. + // + // +optional + CertFile *string `json:"certFile,omitempty"` + + // keySecret defines the secret containing the TLS private key for the web server. + // + // Either `cert` or `certFile` must be defined. + // + // It is mutually exclusive with `keyFile`. + // + // +optional + KeySecret v1.SecretKeySelector `json:"keySecret,omitempty"` + // keyFile defines the path to the TLS private key file in the container for the web server. + // + // If defined, either `cert` or `certFile` must be defined. + // + // It is mutually exclusive with `keySecret`. + // + // +optional + KeyFile *string `json:"keyFile,omitempty"` + + // client_ca defines the Secret or ConfigMap containing the CA certificate for client certificate + // authentication to the server. + // + // It is mutually exclusive with `clientCAFile`. + // + // +optional + //nolint:kubeapilinter // The json tag doesn't meet the conventions to be compatible with Prometheus format. + ClientCA SecretOrConfigMap `json:"client_ca,omitempty"` + // clientCAFile defines the path to the CA certificate file for client certificate authentication to + // the server. + // + // It is mutually exclusive with `client_ca`. + // + // +optional + ClientCAFile *string `json:"clientCAFile,omitempty"` + // clientAuthType defines the server policy for client TLS authentication. + // + // For more detail on clientAuth options: + // https://golang.org/pkg/crypto/tls/#ClientAuthType + // + // +optional + ClientAuthType *string `json:"clientAuthType,omitempty"` + + // minVersion defines the minimum TLS version that is acceptable. + // + // +optional + MinVersion *string `json:"minVersion,omitempty"` + // maxVersion defines the Maximum TLS version that is acceptable. + // + // +optional + MaxVersion *string `json:"maxVersion,omitempty"` + + // cipherSuites defines the list of supported cipher suites for TLS versions up to TLS 1.2. + // + // If not defined, the Go default cipher suites are used. + // Available cipher suites are documented in the Go documentation: + // https://golang.org/pkg/crypto/tls/#pkg-constants + // + // +optional + CipherSuites []string `json:"cipherSuites,omitempty"` + + // preferServerCipherSuites defines whether the server selects the client's most preferred cipher + // suite, or the server's most preferred cipher suite. + // + // If true then the server's preference, as expressed in + // the order of elements in cipherSuites, is used. + // + // +optional + PreferServerCipherSuites *bool `json:"preferServerCipherSuites,omitempty"` // nolint:kubeapilinter + + // curvePreferences defines elliptic curves that will be used in an ECDHE handshake, in preference + // order. + // + // Available curves are documented in the Go documentation: + // https://golang.org/pkg/crypto/tls/#CurveID + // + // +optional + CurvePreferences []string `json:"curvePreferences,omitempty"` +} + +// Validate returns an error if one of the WebTLSConfig fields is invalid. +// A valid WebTLSConfig should have (Cert or CertFile) and (KeySecret or KeyFile) fields which are not +// zero values. +func (c *WebTLSConfig) Validate() error { + if c == nil { + return nil + } + + if c.ClientCA != (SecretOrConfigMap{}) { + if c.ClientCAFile != nil && *c.ClientCAFile != "" { + return errors.New("cannot specify both clientCAFile and clientCA") + } + + if err := c.ClientCA.Validate(); err != nil { + return fmt.Errorf("invalid client CA: %w", err) + } + } + + if c.Cert != (SecretOrConfigMap{}) { + if c.CertFile != nil && *c.CertFile != "" { + return errors.New("cannot specify both cert and certFile") + } + if err := c.Cert.Validate(); err != nil { + return fmt.Errorf("invalid TLS certificate: %w", err) + } + } + + if c.KeyFile != nil && *c.KeyFile != "" && c.KeySecret != (v1.SecretKeySelector{}) { + return errors.New("cannot specify both keyFile and keySecret") + } + + if (c.KeyFile == nil || *c.KeyFile == "") && c.KeySecret == (v1.SecretKeySelector{}) { + return errors.New("TLS private key must be defined") + } + + if (c.CertFile == nil || *c.CertFile == "") && c.Cert == (SecretOrConfigMap{}) { + return errors.New("TLS certificate must be defined") + } + + return nil +} + +// LabelName is a valid Prometheus label name. +// For Prometheus 3.x, a label name is valid if it contains UTF-8 characters. +// For Prometheus 2.x, a label name is only valid if it contains ASCII characters, letters, numbers, as well as underscores. +type LabelName string + +// Endpoint defines an endpoint serving Prometheus metrics to be scraped by +// Prometheus. +// +// +k8s:openapi-gen=true +type Endpoint struct { + // port defines the name of the Service port which this endpoint refers to. + // + // It takes precedence over `targetPort`. + // +optional + Port string `json:"port,omitempty"` + + // targetPort defines the name or number of the target port of the `Pod` object behind the + // Service. The port must be specified with the container's port property. + // + // +optional + TargetPort *intstr.IntOrString `json:"targetPort,omitempty"` + + // path defines the HTTP path from which to scrape for metrics. + // + // If empty, Prometheus uses the default value (e.g. `/metrics`). + // +optional + Path string `json:"path,omitempty"` + + // scheme defines the HTTP scheme to use when scraping the metrics. + // + // +optional + Scheme *Scheme `json:"scheme,omitempty"` + + // params define optional HTTP URL parameters. + // +optional + //nolint:kubeapilinter + Params map[string][]string `json:"params,omitempty"` + + // interval at which Prometheus scrapes the metrics from the target. + // + // If empty, Prometheus uses the global scrape interval. + // +optional + Interval Duration `json:"interval,omitempty"` + + // scrapeTimeout defines the timeout after which Prometheus considers the scrape to be failed. + // + // If empty, Prometheus uses the global scrape timeout unless it is less + // than the target's scrape interval value in which the latter is used. + // The value cannot be greater than the scrape interval otherwise the operator will reject the resource. + // +optional + ScrapeTimeout Duration `json:"scrapeTimeout,omitempty"` + + // honorLabels defines when true the metric's labels when they collide + // with the target's labels. + // +optional + HonorLabels bool `json:"honorLabels,omitempty"` // nolint:kubeapilinter + + // honorTimestamps defines whether Prometheus preserves the timestamps + // when exposed by the target. + // + // +optional + HonorTimestamps *bool `json:"honorTimestamps,omitempty"` // nolint:kubeapilinter + + // trackTimestampsStaleness defines whether Prometheus tracks staleness of + // the metrics that have an explicit timestamp present in scraped data. + // Has no effect if `honorTimestamps` is false. + // + // It requires Prometheus >= v2.48.0. + // + // +optional + TrackTimestampsStaleness *bool `json:"trackTimestampsStaleness,omitempty"` // nolint:kubeapilinter + + // metricRelabelings defines the relabeling rules to apply to the + // samples before ingestion. + // + // +optional + MetricRelabelConfigs []RelabelConfig `json:"metricRelabelings,omitempty"` + + // relabelings defines the relabeling rules to apply the target's + // metadata labels. + // + // The Operator automatically adds relabelings for a few standard Kubernetes fields. + // + // The original scrape job's name is available via the `__tmp_prometheus_job_name` label. + // + // More info: https://prometheus.io/docs/prometheus/latest/configuration/configuration/#relabel_config + // + // +optional + RelabelConfigs []RelabelConfig `json:"relabelings,omitempty"` + + // filterRunning when true, the pods which are not running (e.g. either in Failed or + // Succeeded state) are dropped during the target discovery. + // + // If unset, the filtering is enabled. + // + // More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle/#pod-phase + // + // +optional + FilterRunning *bool `json:"filterRunning,omitempty"` // nolint:kubeapilinter + + // bearerTokenFile defines the file to read bearer token for scraping the target. + // + // Deprecated: use `authorization` instead. + // +optional + BearerTokenFile string `json:"bearerTokenFile,omitempty"` + + HTTPConfigWithProxyAndTLSFiles `json:",inline"` +} + +type AttachMetadata struct { + // node when set to true, Prometheus attaches node metadata to the discovered + // targets. + // + // The Prometheus service account must have the `list` and `watch` + // permissions on the `Nodes` objects. + // + // +optional + Node *bool `json:"node,omitempty"` // nolint:kubeapilinter +} + +// OAuth2 configures OAuth2 settings. +// +// +k8s:openapi-gen=true +type OAuth2 struct { + // clientId defines a key of a Secret or ConfigMap containing the + // OAuth2 client's ID. + // +required + ClientID SecretOrConfigMap `json:"clientId"` + + // clientSecret defines a key of a Secret containing the OAuth2 + // client's secret. + // +required + ClientSecret v1.SecretKeySelector `json:"clientSecret"` + + // tokenUrl defines the URL to fetch the token from. + // + // +kubebuilder:validation:MinLength=1 + // +required + TokenURL string `json:"tokenUrl"` + + // scopes defines the OAuth2 scopes used for the token request. + // + // +optional. + Scopes []string `json:"scopes,omitempty"` + + // endpointParams configures the HTTP parameters to append to the token + // URL. + // + // +optional + //nolint:kubeapilinter + EndpointParams map[string]string `json:"endpointParams,omitempty"` + + // tlsConfig defines the TLS configuration to use when connecting to the OAuth2 server. + // It requires Prometheus >= v2.43.0. + // + // +optional + TLSConfig *SafeTLSConfig `json:"tlsConfig,omitempty"` + + // Proxy configuration to use when connecting to the OAuth2 server. + // It requires Prometheus >= v2.43.0. + // + // +optional + ProxyConfig `json:",inline"` +} + +func (o *OAuth2) Validate() error { + if o == nil { + return nil + } + + if o.TokenURL == "" { + return errors.New("OAuth2 tokenURL must be specified") + } + + if o.ClientID == (SecretOrConfigMap{}) { + return errors.New("OAuth2 clientID must be specified") + } + + if err := o.ClientID.Validate(); err != nil { + return fmt.Errorf("invalid OAuth2 clientID: %w", err) + } + + if err := o.TLSConfig.Validate(); err != nil { + return fmt.Errorf("invalid OAuth2 tlsConfig: %w", err) + } + + return nil +} + +// BasicAuth configures HTTP Basic Authentication settings. +// +// +k8s:openapi-gen=true +type BasicAuth struct { + // username defines a key of a Secret containing the username for + // authentication. + // +optional + Username v1.SecretKeySelector `json:"username,omitempty"` + + // password defines a key of a Secret containing the password for + // authentication. + // +optional + Password v1.SecretKeySelector `json:"password,omitempty"` +} + +// SecretOrConfigMap allows to specify data as a Secret or ConfigMap. Fields are mutually exclusive. +type SecretOrConfigMap struct { + // secret defines the Secret containing data to use for the targets. + // +optional + Secret *v1.SecretKeySelector `json:"secret,omitempty"` + // configMap defines the ConfigMap containing data to use for the targets. + // +optional + ConfigMap *v1.ConfigMapKeySelector `json:"configMap,omitempty"` +} + +// Validate semantically validates the given SecretOrConfigMap. +func (c *SecretOrConfigMap) Validate() error { + if c == nil { + return nil + } + + if c.Secret != nil && c.ConfigMap != nil { + return fmt.Errorf("cannot specify both Secret and ConfigMap") + } + + return nil +} + +func (c *SecretOrConfigMap) String() string { + if c == nil { + return "" + } + + switch { + case c.Secret != nil: + return fmt.Sprintf("", c.Secret.LocalObjectReference.Name, c.Secret.Key) + case c.ConfigMap != nil: + return fmt.Sprintf("", c.ConfigMap.LocalObjectReference.Name, c.ConfigMap.Key) + } + + return "" +} + +// NamespaceSelector is a selector for selecting either all namespaces or a +// list of namespaces. +// If `any` is true, it takes precedence over `matchNames`. +// If `matchNames` is empty and `any` is false, it means that the objects are +// selected from the current namespace. +// +k8s:openapi-gen=true +type NamespaceSelector struct { + // any defines the boolean describing whether all namespaces are selected in contrast to a + // list restricting them. + // +optional + Any bool `json:"any,omitempty"` // nolint:kubeapilinter + // matchNames defines the list of namespace names to select from. + // +optional + MatchNames []string `json:"matchNames,omitempty"` + + // TODO(fabxc): this should embed metav1.LabelSelector eventually. + // Currently the selector is only used for namespaces which require more complex + // implementation to support label selections. +} + +// Argument as part of the AdditionalArgs list. +// +k8s:openapi-gen=true +type Argument struct { + // name of the argument, e.g. "scrape.discovery-reload-interval". + // +kubebuilder:validation:MinLength=1 + // +required + Name string `json:"name"` + // value defines the argument value, e.g. 30s. Can be empty for name-only arguments (e.g. --storage.tsdb.no-lockfile) + // +optional + Value string `json:"value,omitempty"` +} + +// The valid options for Role. +const ( + RoleNode = "node" + RolePod = "pod" + RoleService = "service" + RoleEndpoint = "endpoints" + RoleEndpointSlice = "endpointslice" + RoleIngress = "ingress" +) + +// NativeHistogramConfig extends the native histogram configuration settings. +// +k8s:openapi-gen=true +type NativeHistogramConfig struct { + // scrapeNativeHistograms defines whether to enable scraping of native histograms. + // It requires Prometheus >= v3.8.0. + // + // +optional + ScrapeNativeHistograms *bool `json:"scrapeNativeHistograms,omitempty"` // nolint:kubeapilinter + + // scrapeClassicHistograms defines whether to scrape a classic histogram that is also exposed as a native histogram. + // It requires Prometheus >= v2.45.0. + // + // Notice: `scrapeClassicHistograms` corresponds to the `always_scrape_classic_histograms` field in the Prometheus configuration. + // + // +optional + ScrapeClassicHistograms *bool `json:"scrapeClassicHistograms,omitempty"` // nolint:kubeapilinter + + // nativeHistogramBucketLimit defines ff there are more than this many buckets in a native histogram, + // buckets will be merged to stay within the limit. + // It requires Prometheus >= v2.45.0. + // + // +optional + NativeHistogramBucketLimit *uint64 `json:"nativeHistogramBucketLimit,omitempty"` + + // nativeHistogramMinBucketFactor defines if the growth factor of one bucket to the next is smaller than this, + // buckets will be merged to increase the factor sufficiently. + // It requires Prometheus >= v2.50.0. + // + // +optional + NativeHistogramMinBucketFactor *resource.Quantity `json:"nativeHistogramMinBucketFactor,omitempty"` + + // convertClassicHistogramsToNHCB defines whether to convert all scraped classic histograms into a native histogram with custom buckets. + // It requires Prometheus >= v3.0.0. + // + // +optional + ConvertClassicHistogramsToNHCB *bool `json:"convertClassicHistogramsToNHCB,omitempty"` // nolint:kubeapilinter +} + +// +kubebuilder:validation:Enum=RelabelConfig;RoleSelector +type SelectorMechanism string + +const ( + SelectorMechanismRelabel SelectorMechanism = "RelabelConfig" + SelectorMechanismRole SelectorMechanism = "RoleSelector" +) + +// ConfigResourceStatus is the most recent observed status of the Configuration Resource (ServiceMonitor, PodMonitor, Probes, ScrapeConfig, PrometheusRule or AlertmanagerConfig). Read-only. +// More info: +// https://github.com/kubernetes/community/blob/master/contributors/devel/sig-architecture/api-conventions.md#spec-and-status +// +k8s:openapi-gen=true +type ConfigResourceStatus struct { + // bindings defines the list of workload resources (Prometheus, PrometheusAgent, ThanosRuler or Alertmanager) which select the configuration resource. + // +listType=map + // +listMapKey=group + // +listMapKey=resource + // +listMapKey=name + // +listMapKey=namespace + // +optional + Bindings []WorkloadBinding `json:"bindings,omitempty"` +} + +// WorkloadBinding is a link between a configuration resource and a workload resource. +// +k8s:openapi-gen=true +type WorkloadBinding struct { + // group defines the group of the referenced resource. + // +kubebuilder:validation:Enum=monitoring.coreos.com + // +required + Group string `json:"group"` + // resource defines the type of resource being referenced (e.g. Prometheus, PrometheusAgent, ThanosRuler or Alertmanager). + // +kubebuilder:validation:Enum=prometheuses;prometheusagents;thanosrulers;alertmanagers + // +required + Resource string `json:"resource"` + // name defines the name of the referenced object. + // +kubebuilder:validation:MinLength=1 + // +required + Name string `json:"name"` + // namespace defines the namespace of the referenced object. + // +kubebuilder:validation:MinLength=1 + // +required + Namespace string `json:"namespace"` + // conditions defines the current state of the configuration resource when bound to the referenced Workload object. + // +listType=map + // +listMapKey=type + // +optional + Conditions []ConfigResourceCondition `json:"conditions,omitempty"` +} + +// ConfigResourceCondition describes the status of configuration resources linked to Prometheus, PrometheusAgent, Alertmanager or ThanosRuler. +// +k8s:deepcopy-gen=true +type ConfigResourceCondition struct { + // type of the condition being reported. + // Currently, only "Accepted" is supported. + // +kubebuilder:validation:Enum=Accepted + // +required + Type ConditionType `json:"type"` + // status of the condition. + // +required + Status ConditionStatus `json:"status"` + // lastTransitionTime defines the time of the last update to the current status property. + // +required + LastTransitionTime metav1.Time `json:"lastTransitionTime"` + // reason for the condition's last transition. + // +optional + Reason string `json:"reason,omitempty"` + // message defines the human-readable message indicating details for the condition's last transition. + // +optional + Message string `json:"message,omitempty"` + // observedGeneration defines the .metadata.generation that the + // condition was set based upon. For instance, if `.metadata.generation` is + // currently 12, but the `.status.conditions[].observedGeneration` is 9, the + // condition is out of date with respect to the current state of the object. + // +optional + ObservedGeneration int64 `json:"observedGeneration,omitempty"` +} + +// Supported values are `HTTP` and `HTTPS`. You can also rewrite the +// `__scheme__` label via relabeling configuration. +// +// If empty, the value defaults to `HTTP`. +// +// +kubebuilder:validation:Enum=http;https;HTTP;HTTPS +type Scheme string + +func (s *Scheme) String() string { + if s == nil { + return "" + } + + return strings.ToLower(string(*s)) +} + +const ( + SchemeHTTP Scheme = "HTTP" + SchemeHTTPS Scheme = "HTTPS" +) + +// +kubebuilder:validation:Enum=OrderedReady;Parallel +type PodManagementPolicyType string + +const ( + // OrderedReadyPodManagement will create pods in strictly increasing order on + // scale up and strictly decreasing order on scale down, progressing only when + // the previous pod is ready or terminated. At most one pod will be changed + // at any time. + OrderedReadyPodManagement PodManagementPolicyType = "OrderedReady" + // ParallelPodManagement will create and delete pods as soon as the stateful set + // replica count is changed, and will not wait for pods to be ready or complete + // termination. + ParallelPodManagement PodManagementPolicyType = "Parallel" +) + +// StatefulSetUpdateStrategy indicates the strategy used when updating the +// StatefulSet. It includes any additional parameters necessary to perform the +// update for the indicated strategy. +// +// +kubebuilder:validation:XValidation:rule="!(self.type != 'RollingUpdate' && has(self.rollingUpdate))",message="rollingUpdate requires type to be RollingUpdate" +type StatefulSetUpdateStrategy struct { + // type indicates the type of the StatefulSetUpdateStrategy. + // + // Default is RollingUpdate. + // + // +required + Type StatefulSetUpdateStrategyType `json:"type"` + + // rollingUpdate is used to communicate parameters when type is RollingUpdate. + // + // +optional + RollingUpdate *RollingUpdateStatefulSetStrategy `json:"rollingUpdate,omitempty"` +} + +// RollingUpdateStatefulSetStrategy is used to communicate parameter for the RollingUpdate strategy. +type RollingUpdateStatefulSetStrategy struct { + // maxUnavailable is the maximum number of pods that can be unavailable + // during the update. The value can be an absolute number (ex: 5) or a + // percentage of desired pods (ex: 10%). Absolute number is calculated from + // percentage by rounding up. This can not be 0. Defaults to 1. This field + // is alpha-level and is only honored by servers that enable the + // MaxUnavailableStatefulSet feature. The field applies to all pods in the + // range 0 to Replicas-1. That means if there is any unavailable pod in + // the range 0 to Replicas-1, it will be counted towards MaxUnavailable. + // + // +kubebuilder:validation:XIntOrString + // +optional + MaxUnavailable *intstr.IntOrString `json:"maxUnavailable,omitempty" protobuf:"varint,2,opt,name=maxUnavailable"` +} + +// StatefulSetUpdateStrategyType is a string enumeration type that enumerates +// all possible update strategies for the StatefulSet pods. +// +// +kubebuilder:validation:Enum=OnDelete;RollingUpdate +type StatefulSetUpdateStrategyType string + +const ( + // RollingUpdateStatefulSetStrategyType indicates that update will be + // applied to all Pods in the StatefulSet with respect to the StatefulSet + // ordering constraints. When a scale operation is performed with this + // strategy, new Pods will be created from the specification version indicated + // by the StatefulSet's updateRevision. + RollingUpdateStatefulSetStrategyType StatefulSetUpdateStrategyType = "RollingUpdate" + + // OnDeleteStatefulSetStrategyType triggers the legacy behavior. Version + // tracking and ordered rolling restarts are disabled. Pods are recreated + // from the StatefulSetSpec when they are manually deleted. When a scale + // operation is performed with this strategy, new Pods will be created from + // the the specification version indicated by the StatefulSet's + // currentRevision. + OnDeleteStatefulSetStrategyType StatefulSetUpdateStrategyType = "OnDelete" +) + +type TracingConfig struct { + // clientType defines the client used to export the traces. Supported values are `HTTP` and `GRPC`. + // +kubebuilder:validation:Enum=http;grpc;HTTP;GRPC + // +optional + ClientType *string `json:"clientType",omitempty` + + // endpoint to send the traces to. Should be provided in format :. + // +kubebuilder:validation:MinLength:=1 + // +required + Endpoint string `json:"endpoint"` + + // samplingFraction defines the probability a given trace will be sampled. Must be a float from 0 through 1. + // +optional + SamplingFraction *resource.Quantity `json:"samplingFraction",omitempty` + + // insecure if disabled, the client will use a secure connection. + // +optional + Insecure *bool `json:"insecure",omitempty` // nolint:kubeapilinter + + // headers defines the key-value pairs to be used as headers associated with gRPC or HTTP requests. + // +optional + Headers map[string]string `json:"headers"` + + // compression key for supported compression types. The only supported value is `Gzip`. + // +kubebuilder:validation:Enum=gzip;Gzip + // +optional + Compression *string `json:"compression",omitempty` + + // timeout defines the maximum time the exporter will wait for each batch export. + // +optional + Timeout *Duration `json:"timeout",omitempty` + + // tlsConfig to use when sending traces. + // +optional + TLSConfig *TLSConfig `json:"tlsConfig",omitempty` +} + +// Validate semantically validates the given TracingConfig. +func (tc *TracingConfig) Validate() error { + if tc == nil { + return nil + } + + if err := tc.TLSConfig.Validate(); err != nil { + return err + } + + if tc.SamplingFraction != nil { + min, _ := resource.ParseQuantity("0") + max, _ := resource.ParseQuantity("1") + + if tc.SamplingFraction.Cmp(min) < 0 || tc.SamplingFraction.Cmp(max) > 0 { + return fmt.Errorf("`samplingFraction` must be between 0 and 1") + } + } + + return nil +} diff --git a/vendor/github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring/v1/zz_generated.deepcopy.go b/vendor/github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring/v1/zz_generated.deepcopy.go new file mode 100644 index 0000000000..73314958dc --- /dev/null +++ b/vendor/github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring/v1/zz_generated.deepcopy.go @@ -0,0 +1,4414 @@ +//go:build !ignore_autogenerated + +// Copyright The prometheus-operator Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Code generated by controller-gen. DO NOT EDIT. + +package v1 + +import ( + appsv1 "k8s.io/api/apps/v1" + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/util/intstr" +) + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *APIServerConfig) DeepCopyInto(out *APIServerConfig) { + *out = *in + if in.BasicAuth != nil { + in, out := &in.BasicAuth, &out.BasicAuth + *out = new(BasicAuth) + (*in).DeepCopyInto(*out) + } + if in.TLSConfig != nil { + in, out := &in.TLSConfig, &out.TLSConfig + *out = new(TLSConfig) + (*in).DeepCopyInto(*out) + } + if in.Authorization != nil { + in, out := &in.Authorization, &out.Authorization + *out = new(Authorization) + (*in).DeepCopyInto(*out) + } + in.ProxyConfig.DeepCopyInto(&out.ProxyConfig) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new APIServerConfig. +func (in *APIServerConfig) DeepCopy() *APIServerConfig { + if in == nil { + return nil + } + out := new(APIServerConfig) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AlertingSpec) DeepCopyInto(out *AlertingSpec) { + *out = *in + if in.Alertmanagers != nil { + in, out := &in.Alertmanagers, &out.Alertmanagers + *out = make([]AlertmanagerEndpoints, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AlertingSpec. +func (in *AlertingSpec) DeepCopy() *AlertingSpec { + if in == nil { + return nil + } + out := new(AlertingSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Alertmanager) DeepCopyInto(out *Alertmanager) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Alertmanager. +func (in *Alertmanager) DeepCopy() *Alertmanager { + if in == nil { + return nil + } + out := new(Alertmanager) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AlertmanagerConfigMatcherStrategy) DeepCopyInto(out *AlertmanagerConfigMatcherStrategy) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AlertmanagerConfigMatcherStrategy. +func (in *AlertmanagerConfigMatcherStrategy) DeepCopy() *AlertmanagerConfigMatcherStrategy { + if in == nil { + return nil + } + out := new(AlertmanagerConfigMatcherStrategy) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AlertmanagerConfiguration) DeepCopyInto(out *AlertmanagerConfiguration) { + *out = *in + if in.Global != nil { + in, out := &in.Global, &out.Global + *out = new(AlertmanagerGlobalConfig) + (*in).DeepCopyInto(*out) + } + if in.Templates != nil { + in, out := &in.Templates, &out.Templates + *out = make([]SecretOrConfigMap, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AlertmanagerConfiguration. +func (in *AlertmanagerConfiguration) DeepCopy() *AlertmanagerConfiguration { + if in == nil { + return nil + } + out := new(AlertmanagerConfiguration) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AlertmanagerEndpoints) DeepCopyInto(out *AlertmanagerEndpoints) { + *out = *in + if in.Namespace != nil { + in, out := &in.Namespace, &out.Namespace + *out = new(string) + **out = **in + } + out.Port = in.Port + if in.Scheme != nil { + in, out := &in.Scheme, &out.Scheme + *out = new(Scheme) + **out = **in + } + if in.PathPrefix != nil { + in, out := &in.PathPrefix, &out.PathPrefix + *out = new(string) + **out = **in + } + if in.TLSConfig != nil { + in, out := &in.TLSConfig, &out.TLSConfig + *out = new(TLSConfig) + (*in).DeepCopyInto(*out) + } + if in.BasicAuth != nil { + in, out := &in.BasicAuth, &out.BasicAuth + *out = new(BasicAuth) + (*in).DeepCopyInto(*out) + } + if in.Authorization != nil { + in, out := &in.Authorization, &out.Authorization + *out = new(SafeAuthorization) + (*in).DeepCopyInto(*out) + } + if in.Sigv4 != nil { + in, out := &in.Sigv4, &out.Sigv4 + *out = new(Sigv4) + (*in).DeepCopyInto(*out) + } + in.ProxyConfig.DeepCopyInto(&out.ProxyConfig) + if in.APIVersion != nil { + in, out := &in.APIVersion, &out.APIVersion + *out = new(AlertmanagerAPIVersion) + **out = **in + } + if in.Timeout != nil { + in, out := &in.Timeout, &out.Timeout + *out = new(Duration) + **out = **in + } + if in.EnableHttp2 != nil { + in, out := &in.EnableHttp2, &out.EnableHttp2 + *out = new(bool) + **out = **in + } + if in.RelabelConfigs != nil { + in, out := &in.RelabelConfigs, &out.RelabelConfigs + *out = make([]RelabelConfig, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.AlertRelabelConfigs != nil { + in, out := &in.AlertRelabelConfigs, &out.AlertRelabelConfigs + *out = make([]RelabelConfig, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AlertmanagerEndpoints. +func (in *AlertmanagerEndpoints) DeepCopy() *AlertmanagerEndpoints { + if in == nil { + return nil + } + out := new(AlertmanagerEndpoints) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AlertmanagerGlobalConfig) DeepCopyInto(out *AlertmanagerGlobalConfig) { + *out = *in + if in.SMTPConfig != nil { + in, out := &in.SMTPConfig, &out.SMTPConfig + *out = new(GlobalSMTPConfig) + (*in).DeepCopyInto(*out) + } + if in.HTTPConfigWithProxy != nil { + in, out := &in.HTTPConfigWithProxy, &out.HTTPConfigWithProxy + *out = new(HTTPConfigWithProxy) + (*in).DeepCopyInto(*out) + } + if in.SlackAPIURL != nil { + in, out := &in.SlackAPIURL, &out.SlackAPIURL + *out = new(corev1.SecretKeySelector) + (*in).DeepCopyInto(*out) + } + if in.OpsGenieAPIURL != nil { + in, out := &in.OpsGenieAPIURL, &out.OpsGenieAPIURL + *out = new(corev1.SecretKeySelector) + (*in).DeepCopyInto(*out) + } + if in.OpsGenieAPIKey != nil { + in, out := &in.OpsGenieAPIKey, &out.OpsGenieAPIKey + *out = new(corev1.SecretKeySelector) + (*in).DeepCopyInto(*out) + } + if in.PagerdutyURL != nil { + in, out := &in.PagerdutyURL, &out.PagerdutyURL + *out = new(URL) + **out = **in + } + if in.TelegramConfig != nil { + in, out := &in.TelegramConfig, &out.TelegramConfig + *out = new(GlobalTelegramConfig) + (*in).DeepCopyInto(*out) + } + if in.JiraConfig != nil { + in, out := &in.JiraConfig, &out.JiraConfig + *out = new(GlobalJiraConfig) + (*in).DeepCopyInto(*out) + } + if in.VictorOpsConfig != nil { + in, out := &in.VictorOpsConfig, &out.VictorOpsConfig + *out = new(GlobalVictorOpsConfig) + (*in).DeepCopyInto(*out) + } + if in.RocketChatConfig != nil { + in, out := &in.RocketChatConfig, &out.RocketChatConfig + *out = new(GlobalRocketChatConfig) + (*in).DeepCopyInto(*out) + } + if in.WebexConfig != nil { + in, out := &in.WebexConfig, &out.WebexConfig + *out = new(GlobalWebexConfig) + (*in).DeepCopyInto(*out) + } + if in.WeChatConfig != nil { + in, out := &in.WeChatConfig, &out.WeChatConfig + *out = new(GlobalWeChatConfig) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AlertmanagerGlobalConfig. +func (in *AlertmanagerGlobalConfig) DeepCopy() *AlertmanagerGlobalConfig { + if in == nil { + return nil + } + out := new(AlertmanagerGlobalConfig) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AlertmanagerLimitsSpec) DeepCopyInto(out *AlertmanagerLimitsSpec) { + *out = *in + if in.MaxSilences != nil { + in, out := &in.MaxSilences, &out.MaxSilences + *out = new(int32) + **out = **in + } + if in.MaxPerSilenceBytes != nil { + in, out := &in.MaxPerSilenceBytes, &out.MaxPerSilenceBytes + *out = new(ByteSize) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AlertmanagerLimitsSpec. +func (in *AlertmanagerLimitsSpec) DeepCopy() *AlertmanagerLimitsSpec { + if in == nil { + return nil + } + out := new(AlertmanagerLimitsSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AlertmanagerList) DeepCopyInto(out *AlertmanagerList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]Alertmanager, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AlertmanagerList. +func (in *AlertmanagerList) DeepCopy() *AlertmanagerList { + if in == nil { + return nil + } + out := new(AlertmanagerList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AlertmanagerSpec) DeepCopyInto(out *AlertmanagerSpec) { + *out = *in + if in.PodMetadata != nil { + in, out := &in.PodMetadata, &out.PodMetadata + *out = new(EmbeddedObjectMetadata) + (*in).DeepCopyInto(*out) + } + if in.Image != nil { + in, out := &in.Image, &out.Image + *out = new(string) + **out = **in + } + if in.ImagePullSecrets != nil { + in, out := &in.ImagePullSecrets, &out.ImagePullSecrets + *out = make([]corev1.LocalObjectReference, len(*in)) + copy(*out, *in) + } + if in.Secrets != nil { + in, out := &in.Secrets, &out.Secrets + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.ConfigMaps != nil { + in, out := &in.ConfigMaps, &out.ConfigMaps + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.Replicas != nil { + in, out := &in.Replicas, &out.Replicas + *out = new(int32) + **out = **in + } + if in.Storage != nil { + in, out := &in.Storage, &out.Storage + *out = new(StorageSpec) + (*in).DeepCopyInto(*out) + } + if in.Volumes != nil { + in, out := &in.Volumes, &out.Volumes + *out = make([]corev1.Volume, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.VolumeMounts != nil { + in, out := &in.VolumeMounts, &out.VolumeMounts + *out = make([]corev1.VolumeMount, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.PersistentVolumeClaimRetentionPolicy != nil { + in, out := &in.PersistentVolumeClaimRetentionPolicy, &out.PersistentVolumeClaimRetentionPolicy + *out = new(appsv1.StatefulSetPersistentVolumeClaimRetentionPolicy) + **out = **in + } + if in.NodeSelector != nil { + in, out := &in.NodeSelector, &out.NodeSelector + *out = make(map[string]string, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } + in.Resources.DeepCopyInto(&out.Resources) + if in.Affinity != nil { + in, out := &in.Affinity, &out.Affinity + *out = new(corev1.Affinity) + (*in).DeepCopyInto(*out) + } + if in.Tolerations != nil { + in, out := &in.Tolerations, &out.Tolerations + *out = make([]corev1.Toleration, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.TopologySpreadConstraints != nil { + in, out := &in.TopologySpreadConstraints, &out.TopologySpreadConstraints + *out = make([]corev1.TopologySpreadConstraint, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.SecurityContext != nil { + in, out := &in.SecurityContext, &out.SecurityContext + *out = new(corev1.PodSecurityContext) + (*in).DeepCopyInto(*out) + } + if in.DNSPolicy != nil { + in, out := &in.DNSPolicy, &out.DNSPolicy + *out = new(DNSPolicy) + **out = **in + } + if in.DNSConfig != nil { + in, out := &in.DNSConfig, &out.DNSConfig + *out = new(PodDNSConfig) + (*in).DeepCopyInto(*out) + } + if in.EnableServiceLinks != nil { + in, out := &in.EnableServiceLinks, &out.EnableServiceLinks + *out = new(bool) + **out = **in + } + if in.ServiceName != nil { + in, out := &in.ServiceName, &out.ServiceName + *out = new(string) + **out = **in + } + if in.PodManagementPolicy != nil { + in, out := &in.PodManagementPolicy, &out.PodManagementPolicy + *out = new(PodManagementPolicyType) + **out = **in + } + if in.UpdateStrategy != nil { + in, out := &in.UpdateStrategy, &out.UpdateStrategy + *out = new(StatefulSetUpdateStrategy) + (*in).DeepCopyInto(*out) + } + if in.Containers != nil { + in, out := &in.Containers, &out.Containers + *out = make([]corev1.Container, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.InitContainers != nil { + in, out := &in.InitContainers, &out.InitContainers + *out = make([]corev1.Container, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.AdditionalPeers != nil { + in, out := &in.AdditionalPeers, &out.AdditionalPeers + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.ClusterLabel != nil { + in, out := &in.ClusterLabel, &out.ClusterLabel + *out = new(string) + **out = **in + } + if in.AlertmanagerConfigSelector != nil { + in, out := &in.AlertmanagerConfigSelector, &out.AlertmanagerConfigSelector + *out = new(metav1.LabelSelector) + (*in).DeepCopyInto(*out) + } + if in.AlertmanagerConfigNamespaceSelector != nil { + in, out := &in.AlertmanagerConfigNamespaceSelector, &out.AlertmanagerConfigNamespaceSelector + *out = new(metav1.LabelSelector) + (*in).DeepCopyInto(*out) + } + out.AlertmanagerConfigMatcherStrategy = in.AlertmanagerConfigMatcherStrategy + if in.MinReadySeconds != nil { + in, out := &in.MinReadySeconds, &out.MinReadySeconds + *out = new(int32) + **out = **in + } + if in.HostAliases != nil { + in, out := &in.HostAliases, &out.HostAliases + *out = make([]HostAlias, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Web != nil { + in, out := &in.Web, &out.Web + *out = new(AlertmanagerWebSpec) + (*in).DeepCopyInto(*out) + } + if in.Limits != nil { + in, out := &in.Limits, &out.Limits + *out = new(AlertmanagerLimitsSpec) + (*in).DeepCopyInto(*out) + } + if in.ClusterTLS != nil { + in, out := &in.ClusterTLS, &out.ClusterTLS + *out = new(ClusterTLSConfig) + (*in).DeepCopyInto(*out) + } + if in.AlertmanagerConfiguration != nil { + in, out := &in.AlertmanagerConfiguration, &out.AlertmanagerConfiguration + *out = new(AlertmanagerConfiguration) + (*in).DeepCopyInto(*out) + } + if in.AutomountServiceAccountToken != nil { + in, out := &in.AutomountServiceAccountToken, &out.AutomountServiceAccountToken + *out = new(bool) + **out = **in + } + if in.EnableFeatures != nil { + in, out := &in.EnableFeatures, &out.EnableFeatures + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.AdditionalArgs != nil { + in, out := &in.AdditionalArgs, &out.AdditionalArgs + *out = make([]Argument, len(*in)) + copy(*out, *in) + } + if in.TerminationGracePeriodSeconds != nil { + in, out := &in.TerminationGracePeriodSeconds, &out.TerminationGracePeriodSeconds + *out = new(int64) + **out = **in + } + if in.HostUsers != nil { + in, out := &in.HostUsers, &out.HostUsers + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AlertmanagerSpec. +func (in *AlertmanagerSpec) DeepCopy() *AlertmanagerSpec { + if in == nil { + return nil + } + out := new(AlertmanagerSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AlertmanagerStatus) DeepCopyInto(out *AlertmanagerStatus) { + *out = *in + if in.Conditions != nil { + in, out := &in.Conditions, &out.Conditions + *out = make([]Condition, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AlertmanagerStatus. +func (in *AlertmanagerStatus) DeepCopy() *AlertmanagerStatus { + if in == nil { + return nil + } + out := new(AlertmanagerStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AlertmanagerWebSpec) DeepCopyInto(out *AlertmanagerWebSpec) { + *out = *in + in.WebConfigFileFields.DeepCopyInto(&out.WebConfigFileFields) + if in.GetConcurrency != nil { + in, out := &in.GetConcurrency, &out.GetConcurrency + *out = new(uint32) + **out = **in + } + if in.Timeout != nil { + in, out := &in.Timeout, &out.Timeout + *out = new(uint32) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AlertmanagerWebSpec. +func (in *AlertmanagerWebSpec) DeepCopy() *AlertmanagerWebSpec { + if in == nil { + return nil + } + out := new(AlertmanagerWebSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ArbitraryFSAccessThroughSMsConfig) DeepCopyInto(out *ArbitraryFSAccessThroughSMsConfig) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ArbitraryFSAccessThroughSMsConfig. +func (in *ArbitraryFSAccessThroughSMsConfig) DeepCopy() *ArbitraryFSAccessThroughSMsConfig { + if in == nil { + return nil + } + out := new(ArbitraryFSAccessThroughSMsConfig) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Argument) DeepCopyInto(out *Argument) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Argument. +func (in *Argument) DeepCopy() *Argument { + if in == nil { + return nil + } + out := new(Argument) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AttachMetadata) DeepCopyInto(out *AttachMetadata) { + *out = *in + if in.Node != nil { + in, out := &in.Node, &out.Node + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AttachMetadata. +func (in *AttachMetadata) DeepCopy() *AttachMetadata { + if in == nil { + return nil + } + out := new(AttachMetadata) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Authorization) DeepCopyInto(out *Authorization) { + *out = *in + in.SafeAuthorization.DeepCopyInto(&out.SafeAuthorization) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Authorization. +func (in *Authorization) DeepCopy() *Authorization { + if in == nil { + return nil + } + out := new(Authorization) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AzureAD) DeepCopyInto(out *AzureAD) { + *out = *in + if in.Cloud != nil { + in, out := &in.Cloud, &out.Cloud + *out = new(string) + **out = **in + } + if in.ManagedIdentity != nil { + in, out := &in.ManagedIdentity, &out.ManagedIdentity + *out = new(ManagedIdentity) + (*in).DeepCopyInto(*out) + } + if in.OAuth != nil { + in, out := &in.OAuth, &out.OAuth + *out = new(AzureOAuth) + (*in).DeepCopyInto(*out) + } + if in.SDK != nil { + in, out := &in.SDK, &out.SDK + *out = new(AzureSDK) + (*in).DeepCopyInto(*out) + } + if in.WorkloadIdentity != nil { + in, out := &in.WorkloadIdentity, &out.WorkloadIdentity + *out = new(AzureWorkloadIdentity) + **out = **in + } + if in.Scope != nil { + in, out := &in.Scope, &out.Scope + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AzureAD. +func (in *AzureAD) DeepCopy() *AzureAD { + if in == nil { + return nil + } + out := new(AzureAD) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AzureOAuth) DeepCopyInto(out *AzureOAuth) { + *out = *in + in.ClientSecret.DeepCopyInto(&out.ClientSecret) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AzureOAuth. +func (in *AzureOAuth) DeepCopy() *AzureOAuth { + if in == nil { + return nil + } + out := new(AzureOAuth) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AzureSDK) DeepCopyInto(out *AzureSDK) { + *out = *in + if in.TenantID != nil { + in, out := &in.TenantID, &out.TenantID + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AzureSDK. +func (in *AzureSDK) DeepCopy() *AzureSDK { + if in == nil { + return nil + } + out := new(AzureSDK) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AzureWorkloadIdentity) DeepCopyInto(out *AzureWorkloadIdentity) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AzureWorkloadIdentity. +func (in *AzureWorkloadIdentity) DeepCopy() *AzureWorkloadIdentity { + if in == nil { + return nil + } + out := new(AzureWorkloadIdentity) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *BasicAuth) DeepCopyInto(out *BasicAuth) { + *out = *in + in.Username.DeepCopyInto(&out.Username) + in.Password.DeepCopyInto(&out.Password) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BasicAuth. +func (in *BasicAuth) DeepCopy() *BasicAuth { + if in == nil { + return nil + } + out := new(BasicAuth) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ClusterTLSConfig) DeepCopyInto(out *ClusterTLSConfig) { + *out = *in + in.ServerTLS.DeepCopyInto(&out.ServerTLS) + in.ClientTLS.DeepCopyInto(&out.ClientTLS) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterTLSConfig. +func (in *ClusterTLSConfig) DeepCopy() *ClusterTLSConfig { + if in == nil { + return nil + } + out := new(ClusterTLSConfig) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CommonPrometheusFields) DeepCopyInto(out *CommonPrometheusFields) { + *out = *in + if in.PodMetadata != nil { + in, out := &in.PodMetadata, &out.PodMetadata + *out = new(EmbeddedObjectMetadata) + (*in).DeepCopyInto(*out) + } + if in.ServiceMonitorSelector != nil { + in, out := &in.ServiceMonitorSelector, &out.ServiceMonitorSelector + *out = new(metav1.LabelSelector) + (*in).DeepCopyInto(*out) + } + if in.ServiceMonitorNamespaceSelector != nil { + in, out := &in.ServiceMonitorNamespaceSelector, &out.ServiceMonitorNamespaceSelector + *out = new(metav1.LabelSelector) + (*in).DeepCopyInto(*out) + } + if in.PodMonitorSelector != nil { + in, out := &in.PodMonitorSelector, &out.PodMonitorSelector + *out = new(metav1.LabelSelector) + (*in).DeepCopyInto(*out) + } + if in.PodMonitorNamespaceSelector != nil { + in, out := &in.PodMonitorNamespaceSelector, &out.PodMonitorNamespaceSelector + *out = new(metav1.LabelSelector) + (*in).DeepCopyInto(*out) + } + if in.ProbeSelector != nil { + in, out := &in.ProbeSelector, &out.ProbeSelector + *out = new(metav1.LabelSelector) + (*in).DeepCopyInto(*out) + } + if in.ProbeNamespaceSelector != nil { + in, out := &in.ProbeNamespaceSelector, &out.ProbeNamespaceSelector + *out = new(metav1.LabelSelector) + (*in).DeepCopyInto(*out) + } + if in.ScrapeConfigSelector != nil { + in, out := &in.ScrapeConfigSelector, &out.ScrapeConfigSelector + *out = new(metav1.LabelSelector) + (*in).DeepCopyInto(*out) + } + if in.ScrapeConfigNamespaceSelector != nil { + in, out := &in.ScrapeConfigNamespaceSelector, &out.ScrapeConfigNamespaceSelector + *out = new(metav1.LabelSelector) + (*in).DeepCopyInto(*out) + } + if in.Image != nil { + in, out := &in.Image, &out.Image + *out = new(string) + **out = **in + } + if in.ImagePullSecrets != nil { + in, out := &in.ImagePullSecrets, &out.ImagePullSecrets + *out = make([]corev1.LocalObjectReference, len(*in)) + copy(*out, *in) + } + if in.Replicas != nil { + in, out := &in.Replicas, &out.Replicas + *out = new(int32) + **out = **in + } + if in.Shards != nil { + in, out := &in.Shards, &out.Shards + *out = new(int32) + **out = **in + } + if in.ReplicaExternalLabelName != nil { + in, out := &in.ReplicaExternalLabelName, &out.ReplicaExternalLabelName + *out = new(string) + **out = **in + } + if in.PrometheusExternalLabelName != nil { + in, out := &in.PrometheusExternalLabelName, &out.PrometheusExternalLabelName + *out = new(string) + **out = **in + } + if in.ScrapeProtocols != nil { + in, out := &in.ScrapeProtocols, &out.ScrapeProtocols + *out = make([]ScrapeProtocol, len(*in)) + copy(*out, *in) + } + if in.ExternalLabels != nil { + in, out := &in.ExternalLabels, &out.ExternalLabels + *out = make(map[string]string, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } + if in.EnableOTLPReceiver != nil { + in, out := &in.EnableOTLPReceiver, &out.EnableOTLPReceiver + *out = new(bool) + **out = **in + } + if in.RemoteWriteReceiverMessageVersions != nil { + in, out := &in.RemoteWriteReceiverMessageVersions, &out.RemoteWriteReceiverMessageVersions + *out = make([]RemoteWriteMessageVersion, len(*in)) + copy(*out, *in) + } + if in.EnableFeatures != nil { + in, out := &in.EnableFeatures, &out.EnableFeatures + *out = make([]EnableFeature, len(*in)) + copy(*out, *in) + } + if in.Storage != nil { + in, out := &in.Storage, &out.Storage + *out = new(StorageSpec) + (*in).DeepCopyInto(*out) + } + if in.Volumes != nil { + in, out := &in.Volumes, &out.Volumes + *out = make([]corev1.Volume, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.VolumeMounts != nil { + in, out := &in.VolumeMounts, &out.VolumeMounts + *out = make([]corev1.VolumeMount, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.PersistentVolumeClaimRetentionPolicy != nil { + in, out := &in.PersistentVolumeClaimRetentionPolicy, &out.PersistentVolumeClaimRetentionPolicy + *out = new(appsv1.StatefulSetPersistentVolumeClaimRetentionPolicy) + **out = **in + } + if in.Web != nil { + in, out := &in.Web, &out.Web + *out = new(PrometheusWebSpec) + (*in).DeepCopyInto(*out) + } + in.Resources.DeepCopyInto(&out.Resources) + if in.NodeSelector != nil { + in, out := &in.NodeSelector, &out.NodeSelector + *out = make(map[string]string, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } + if in.AutomountServiceAccountToken != nil { + in, out := &in.AutomountServiceAccountToken, &out.AutomountServiceAccountToken + *out = new(bool) + **out = **in + } + if in.Secrets != nil { + in, out := &in.Secrets, &out.Secrets + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.ConfigMaps != nil { + in, out := &in.ConfigMaps, &out.ConfigMaps + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.Affinity != nil { + in, out := &in.Affinity, &out.Affinity + *out = new(corev1.Affinity) + (*in).DeepCopyInto(*out) + } + if in.Tolerations != nil { + in, out := &in.Tolerations, &out.Tolerations + *out = make([]corev1.Toleration, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.TopologySpreadConstraints != nil { + in, out := &in.TopologySpreadConstraints, &out.TopologySpreadConstraints + *out = make([]TopologySpreadConstraint, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.RemoteWrite != nil { + in, out := &in.RemoteWrite, &out.RemoteWrite + *out = make([]RemoteWriteSpec, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.OTLP != nil { + in, out := &in.OTLP, &out.OTLP + *out = new(OTLPConfig) + (*in).DeepCopyInto(*out) + } + if in.SecurityContext != nil { + in, out := &in.SecurityContext, &out.SecurityContext + *out = new(corev1.PodSecurityContext) + (*in).DeepCopyInto(*out) + } + if in.DNSPolicy != nil { + in, out := &in.DNSPolicy, &out.DNSPolicy + *out = new(DNSPolicy) + **out = **in + } + if in.DNSConfig != nil { + in, out := &in.DNSConfig, &out.DNSConfig + *out = new(PodDNSConfig) + (*in).DeepCopyInto(*out) + } + if in.PodManagementPolicy != nil { + in, out := &in.PodManagementPolicy, &out.PodManagementPolicy + *out = new(PodManagementPolicyType) + **out = **in + } + if in.UpdateStrategy != nil { + in, out := &in.UpdateStrategy, &out.UpdateStrategy + *out = new(StatefulSetUpdateStrategy) + (*in).DeepCopyInto(*out) + } + if in.EnableServiceLinks != nil { + in, out := &in.EnableServiceLinks, &out.EnableServiceLinks + *out = new(bool) + **out = **in + } + if in.Containers != nil { + in, out := &in.Containers, &out.Containers + *out = make([]corev1.Container, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.InitContainers != nil { + in, out := &in.InitContainers, &out.InitContainers + *out = make([]corev1.Container, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.AdditionalScrapeConfigs != nil { + in, out := &in.AdditionalScrapeConfigs, &out.AdditionalScrapeConfigs + *out = new(corev1.SecretKeySelector) + (*in).DeepCopyInto(*out) + } + if in.APIServerConfig != nil { + in, out := &in.APIServerConfig, &out.APIServerConfig + *out = new(APIServerConfig) + (*in).DeepCopyInto(*out) + } + out.ArbitraryFSAccessThroughSMs = in.ArbitraryFSAccessThroughSMs + if in.EnforcedSampleLimit != nil { + in, out := &in.EnforcedSampleLimit, &out.EnforcedSampleLimit + *out = new(uint64) + **out = **in + } + if in.EnforcedTargetLimit != nil { + in, out := &in.EnforcedTargetLimit, &out.EnforcedTargetLimit + *out = new(uint64) + **out = **in + } + if in.EnforcedLabelLimit != nil { + in, out := &in.EnforcedLabelLimit, &out.EnforcedLabelLimit + *out = new(uint64) + **out = **in + } + if in.EnforcedLabelNameLengthLimit != nil { + in, out := &in.EnforcedLabelNameLengthLimit, &out.EnforcedLabelNameLengthLimit + *out = new(uint64) + **out = **in + } + if in.EnforcedLabelValueLengthLimit != nil { + in, out := &in.EnforcedLabelValueLengthLimit, &out.EnforcedLabelValueLengthLimit + *out = new(uint64) + **out = **in + } + if in.EnforcedKeepDroppedTargets != nil { + in, out := &in.EnforcedKeepDroppedTargets, &out.EnforcedKeepDroppedTargets + *out = new(uint64) + **out = **in + } + if in.NameValidationScheme != nil { + in, out := &in.NameValidationScheme, &out.NameValidationScheme + *out = new(NameValidationSchemeOptions) + **out = **in + } + if in.NameEscapingScheme != nil { + in, out := &in.NameEscapingScheme, &out.NameEscapingScheme + *out = new(NameEscapingSchemeOptions) + **out = **in + } + if in.ConvertClassicHistogramsToNHCB != nil { + in, out := &in.ConvertClassicHistogramsToNHCB, &out.ConvertClassicHistogramsToNHCB + *out = new(bool) + **out = **in + } + if in.ScrapeNativeHistograms != nil { + in, out := &in.ScrapeNativeHistograms, &out.ScrapeNativeHistograms + *out = new(bool) + **out = **in + } + if in.ScrapeClassicHistograms != nil { + in, out := &in.ScrapeClassicHistograms, &out.ScrapeClassicHistograms + *out = new(bool) + **out = **in + } + if in.MinReadySeconds != nil { + in, out := &in.MinReadySeconds, &out.MinReadySeconds + *out = new(int32) + **out = **in + } + if in.HostAliases != nil { + in, out := &in.HostAliases, &out.HostAliases + *out = make([]HostAlias, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.AdditionalArgs != nil { + in, out := &in.AdditionalArgs, &out.AdditionalArgs + *out = make([]Argument, len(*in)) + copy(*out, *in) + } + if in.WALCompression != nil { + in, out := &in.WALCompression, &out.WALCompression + *out = new(bool) + **out = **in + } + if in.ExcludedFromEnforcement != nil { + in, out := &in.ExcludedFromEnforcement, &out.ExcludedFromEnforcement + *out = make([]ObjectReference, len(*in)) + copy(*out, *in) + } + if in.PodTargetLabels != nil { + in, out := &in.PodTargetLabels, &out.PodTargetLabels + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.TracingConfig != nil { + in, out := &in.TracingConfig, &out.TracingConfig + *out = new(TracingConfig) + (*in).DeepCopyInto(*out) + } + if in.BodySizeLimit != nil { + in, out := &in.BodySizeLimit, &out.BodySizeLimit + *out = new(ByteSize) + **out = **in + } + if in.SampleLimit != nil { + in, out := &in.SampleLimit, &out.SampleLimit + *out = new(uint64) + **out = **in + } + if in.TargetLimit != nil { + in, out := &in.TargetLimit, &out.TargetLimit + *out = new(uint64) + **out = **in + } + if in.LabelLimit != nil { + in, out := &in.LabelLimit, &out.LabelLimit + *out = new(uint64) + **out = **in + } + if in.LabelNameLengthLimit != nil { + in, out := &in.LabelNameLengthLimit, &out.LabelNameLengthLimit + *out = new(uint64) + **out = **in + } + if in.LabelValueLengthLimit != nil { + in, out := &in.LabelValueLengthLimit, &out.LabelValueLengthLimit + *out = new(uint64) + **out = **in + } + if in.KeepDroppedTargets != nil { + in, out := &in.KeepDroppedTargets, &out.KeepDroppedTargets + *out = new(uint64) + **out = **in + } + if in.ReloadStrategy != nil { + in, out := &in.ReloadStrategy, &out.ReloadStrategy + *out = new(ReloadStrategyType) + **out = **in + } + if in.MaximumStartupDurationSeconds != nil { + in, out := &in.MaximumStartupDurationSeconds, &out.MaximumStartupDurationSeconds + *out = new(int32) + **out = **in + } + if in.ScrapeClasses != nil { + in, out := &in.ScrapeClasses, &out.ScrapeClasses + *out = make([]ScrapeClass, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.ServiceDiscoveryRole != nil { + in, out := &in.ServiceDiscoveryRole, &out.ServiceDiscoveryRole + *out = new(ServiceDiscoveryRole) + **out = **in + } + if in.TSDB != nil { + in, out := &in.TSDB, &out.TSDB + *out = new(TSDBSpec) + (*in).DeepCopyInto(*out) + } + if in.ScrapeFailureLogFile != nil { + in, out := &in.ScrapeFailureLogFile, &out.ScrapeFailureLogFile + *out = new(string) + **out = **in + } + if in.ServiceName != nil { + in, out := &in.ServiceName, &out.ServiceName + *out = new(string) + **out = **in + } + if in.Runtime != nil { + in, out := &in.Runtime, &out.Runtime + *out = new(RuntimeConfig) + (*in).DeepCopyInto(*out) + } + if in.TerminationGracePeriodSeconds != nil { + in, out := &in.TerminationGracePeriodSeconds, &out.TerminationGracePeriodSeconds + *out = new(int64) + **out = **in + } + if in.HostUsers != nil { + in, out := &in.HostUsers, &out.HostUsers + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CommonPrometheusFields. +func (in *CommonPrometheusFields) DeepCopy() *CommonPrometheusFields { + if in == nil { + return nil + } + out := new(CommonPrometheusFields) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Condition) DeepCopyInto(out *Condition) { + *out = *in + in.LastTransitionTime.DeepCopyInto(&out.LastTransitionTime) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Condition. +func (in *Condition) DeepCopy() *Condition { + if in == nil { + return nil + } + out := new(Condition) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ConfigResourceCondition) DeepCopyInto(out *ConfigResourceCondition) { + *out = *in + in.LastTransitionTime.DeepCopyInto(&out.LastTransitionTime) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ConfigResourceCondition. +func (in *ConfigResourceCondition) DeepCopy() *ConfigResourceCondition { + if in == nil { + return nil + } + out := new(ConfigResourceCondition) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ConfigResourceStatus) DeepCopyInto(out *ConfigResourceStatus) { + *out = *in + if in.Bindings != nil { + in, out := &in.Bindings, &out.Bindings + *out = make([]WorkloadBinding, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ConfigResourceStatus. +func (in *ConfigResourceStatus) DeepCopy() *ConfigResourceStatus { + if in == nil { + return nil + } + out := new(ConfigResourceStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CoreV1TopologySpreadConstraint) DeepCopyInto(out *CoreV1TopologySpreadConstraint) { + *out = *in + if in.LabelSelector != nil { + in, out := &in.LabelSelector, &out.LabelSelector + *out = new(metav1.LabelSelector) + (*in).DeepCopyInto(*out) + } + if in.MinDomains != nil { + in, out := &in.MinDomains, &out.MinDomains + *out = new(int32) + **out = **in + } + if in.NodeAffinityPolicy != nil { + in, out := &in.NodeAffinityPolicy, &out.NodeAffinityPolicy + *out = new(corev1.NodeInclusionPolicy) + **out = **in + } + if in.NodeTaintsPolicy != nil { + in, out := &in.NodeTaintsPolicy, &out.NodeTaintsPolicy + *out = new(corev1.NodeInclusionPolicy) + **out = **in + } + if in.MatchLabelKeys != nil { + in, out := &in.MatchLabelKeys, &out.MatchLabelKeys + *out = make([]string, len(*in)) + copy(*out, *in) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CoreV1TopologySpreadConstraint. +func (in *CoreV1TopologySpreadConstraint) DeepCopy() *CoreV1TopologySpreadConstraint { + if in == nil { + return nil + } + out := new(CoreV1TopologySpreadConstraint) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *EmbeddedObjectMetadata) DeepCopyInto(out *EmbeddedObjectMetadata) { + *out = *in + if in.Labels != nil { + in, out := &in.Labels, &out.Labels + *out = make(map[string]string, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } + if in.Annotations != nil { + in, out := &in.Annotations, &out.Annotations + *out = make(map[string]string, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EmbeddedObjectMetadata. +func (in *EmbeddedObjectMetadata) DeepCopy() *EmbeddedObjectMetadata { + if in == nil { + return nil + } + out := new(EmbeddedObjectMetadata) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *EmbeddedPersistentVolumeClaim) DeepCopyInto(out *EmbeddedPersistentVolumeClaim) { + *out = *in + out.TypeMeta = in.TypeMeta + in.EmbeddedObjectMetadata.DeepCopyInto(&out.EmbeddedObjectMetadata) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EmbeddedPersistentVolumeClaim. +func (in *EmbeddedPersistentVolumeClaim) DeepCopy() *EmbeddedPersistentVolumeClaim { + if in == nil { + return nil + } + out := new(EmbeddedPersistentVolumeClaim) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Endpoint) DeepCopyInto(out *Endpoint) { + *out = *in + if in.TargetPort != nil { + in, out := &in.TargetPort, &out.TargetPort + *out = new(intstr.IntOrString) + **out = **in + } + if in.Scheme != nil { + in, out := &in.Scheme, &out.Scheme + *out = new(Scheme) + **out = **in + } + if in.Params != nil { + in, out := &in.Params, &out.Params + *out = make(map[string][]string, len(*in)) + for key, val := range *in { + var outVal []string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = make([]string, len(*in)) + copy(*out, *in) + } + (*out)[key] = outVal + } + } + if in.HonorTimestamps != nil { + in, out := &in.HonorTimestamps, &out.HonorTimestamps + *out = new(bool) + **out = **in + } + if in.TrackTimestampsStaleness != nil { + in, out := &in.TrackTimestampsStaleness, &out.TrackTimestampsStaleness + *out = new(bool) + **out = **in + } + if in.MetricRelabelConfigs != nil { + in, out := &in.MetricRelabelConfigs, &out.MetricRelabelConfigs + *out = make([]RelabelConfig, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.RelabelConfigs != nil { + in, out := &in.RelabelConfigs, &out.RelabelConfigs + *out = make([]RelabelConfig, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.FilterRunning != nil { + in, out := &in.FilterRunning, &out.FilterRunning + *out = new(bool) + **out = **in + } + in.HTTPConfigWithProxyAndTLSFiles.DeepCopyInto(&out.HTTPConfigWithProxyAndTLSFiles) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Endpoint. +func (in *Endpoint) DeepCopy() *Endpoint { + if in == nil { + return nil + } + out := new(Endpoint) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Exemplars) DeepCopyInto(out *Exemplars) { + *out = *in + if in.MaxSize != nil { + in, out := &in.MaxSize, &out.MaxSize + *out = new(int64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Exemplars. +func (in *Exemplars) DeepCopy() *Exemplars { + if in == nil { + return nil + } + out := new(Exemplars) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *GlobalJiraConfig) DeepCopyInto(out *GlobalJiraConfig) { + *out = *in + if in.APIURL != nil { + in, out := &in.APIURL, &out.APIURL + *out = new(URL) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GlobalJiraConfig. +func (in *GlobalJiraConfig) DeepCopy() *GlobalJiraConfig { + if in == nil { + return nil + } + out := new(GlobalJiraConfig) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *GlobalRocketChatConfig) DeepCopyInto(out *GlobalRocketChatConfig) { + *out = *in + if in.APIURL != nil { + in, out := &in.APIURL, &out.APIURL + *out = new(URL) + **out = **in + } + if in.Token != nil { + in, out := &in.Token, &out.Token + *out = new(corev1.SecretKeySelector) + (*in).DeepCopyInto(*out) + } + if in.TokenID != nil { + in, out := &in.TokenID, &out.TokenID + *out = new(corev1.SecretKeySelector) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GlobalRocketChatConfig. +func (in *GlobalRocketChatConfig) DeepCopy() *GlobalRocketChatConfig { + if in == nil { + return nil + } + out := new(GlobalRocketChatConfig) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *GlobalSMTPConfig) DeepCopyInto(out *GlobalSMTPConfig) { + *out = *in + if in.From != nil { + in, out := &in.From, &out.From + *out = new(string) + **out = **in + } + if in.SmartHost != nil { + in, out := &in.SmartHost, &out.SmartHost + *out = new(HostPort) + **out = **in + } + if in.Hello != nil { + in, out := &in.Hello, &out.Hello + *out = new(string) + **out = **in + } + if in.AuthUsername != nil { + in, out := &in.AuthUsername, &out.AuthUsername + *out = new(string) + **out = **in + } + if in.AuthPassword != nil { + in, out := &in.AuthPassword, &out.AuthPassword + *out = new(corev1.SecretKeySelector) + (*in).DeepCopyInto(*out) + } + if in.AuthIdentity != nil { + in, out := &in.AuthIdentity, &out.AuthIdentity + *out = new(string) + **out = **in + } + if in.AuthSecret != nil { + in, out := &in.AuthSecret, &out.AuthSecret + *out = new(corev1.SecretKeySelector) + (*in).DeepCopyInto(*out) + } + if in.RequireTLS != nil { + in, out := &in.RequireTLS, &out.RequireTLS + *out = new(bool) + **out = **in + } + if in.TLSConfig != nil { + in, out := &in.TLSConfig, &out.TLSConfig + *out = new(SafeTLSConfig) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GlobalSMTPConfig. +func (in *GlobalSMTPConfig) DeepCopy() *GlobalSMTPConfig { + if in == nil { + return nil + } + out := new(GlobalSMTPConfig) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *GlobalTelegramConfig) DeepCopyInto(out *GlobalTelegramConfig) { + *out = *in + if in.APIURL != nil { + in, out := &in.APIURL, &out.APIURL + *out = new(URL) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GlobalTelegramConfig. +func (in *GlobalTelegramConfig) DeepCopy() *GlobalTelegramConfig { + if in == nil { + return nil + } + out := new(GlobalTelegramConfig) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *GlobalVictorOpsConfig) DeepCopyInto(out *GlobalVictorOpsConfig) { + *out = *in + if in.APIURL != nil { + in, out := &in.APIURL, &out.APIURL + *out = new(URL) + **out = **in + } + if in.APIKey != nil { + in, out := &in.APIKey, &out.APIKey + *out = new(corev1.SecretKeySelector) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GlobalVictorOpsConfig. +func (in *GlobalVictorOpsConfig) DeepCopy() *GlobalVictorOpsConfig { + if in == nil { + return nil + } + out := new(GlobalVictorOpsConfig) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *GlobalWeChatConfig) DeepCopyInto(out *GlobalWeChatConfig) { + *out = *in + if in.APIURL != nil { + in, out := &in.APIURL, &out.APIURL + *out = new(URL) + **out = **in + } + if in.APISecret != nil { + in, out := &in.APISecret, &out.APISecret + *out = new(corev1.SecretKeySelector) + (*in).DeepCopyInto(*out) + } + if in.APICorpID != nil { + in, out := &in.APICorpID, &out.APICorpID + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GlobalWeChatConfig. +func (in *GlobalWeChatConfig) DeepCopy() *GlobalWeChatConfig { + if in == nil { + return nil + } + out := new(GlobalWeChatConfig) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *GlobalWebexConfig) DeepCopyInto(out *GlobalWebexConfig) { + *out = *in + if in.APIURL != nil { + in, out := &in.APIURL, &out.APIURL + *out = new(URL) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GlobalWebexConfig. +func (in *GlobalWebexConfig) DeepCopy() *GlobalWebexConfig { + if in == nil { + return nil + } + out := new(GlobalWebexConfig) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *HTTPConfig) DeepCopyInto(out *HTTPConfig) { + *out = *in + in.HTTPConfigWithoutTLS.DeepCopyInto(&out.HTTPConfigWithoutTLS) + if in.TLSConfig != nil { + in, out := &in.TLSConfig, &out.TLSConfig + *out = new(SafeTLSConfig) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HTTPConfig. +func (in *HTTPConfig) DeepCopy() *HTTPConfig { + if in == nil { + return nil + } + out := new(HTTPConfig) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *HTTPConfigWithProxy) DeepCopyInto(out *HTTPConfigWithProxy) { + *out = *in + in.HTTPConfig.DeepCopyInto(&out.HTTPConfig) + in.ProxyConfig.DeepCopyInto(&out.ProxyConfig) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HTTPConfigWithProxy. +func (in *HTTPConfigWithProxy) DeepCopy() *HTTPConfigWithProxy { + if in == nil { + return nil + } + out := new(HTTPConfigWithProxy) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *HTTPConfigWithProxyAndTLSFiles) DeepCopyInto(out *HTTPConfigWithProxyAndTLSFiles) { + *out = *in + in.HTTPConfigWithTLSFiles.DeepCopyInto(&out.HTTPConfigWithTLSFiles) + in.ProxyConfig.DeepCopyInto(&out.ProxyConfig) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HTTPConfigWithProxyAndTLSFiles. +func (in *HTTPConfigWithProxyAndTLSFiles) DeepCopy() *HTTPConfigWithProxyAndTLSFiles { + if in == nil { + return nil + } + out := new(HTTPConfigWithProxyAndTLSFiles) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *HTTPConfigWithTLSFiles) DeepCopyInto(out *HTTPConfigWithTLSFiles) { + *out = *in + in.HTTPConfigWithoutTLS.DeepCopyInto(&out.HTTPConfigWithoutTLS) + if in.TLSConfig != nil { + in, out := &in.TLSConfig, &out.TLSConfig + *out = new(TLSConfig) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HTTPConfigWithTLSFiles. +func (in *HTTPConfigWithTLSFiles) DeepCopy() *HTTPConfigWithTLSFiles { + if in == nil { + return nil + } + out := new(HTTPConfigWithTLSFiles) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *HTTPConfigWithoutTLS) DeepCopyInto(out *HTTPConfigWithoutTLS) { + *out = *in + if in.Authorization != nil { + in, out := &in.Authorization, &out.Authorization + *out = new(SafeAuthorization) + (*in).DeepCopyInto(*out) + } + if in.BasicAuth != nil { + in, out := &in.BasicAuth, &out.BasicAuth + *out = new(BasicAuth) + (*in).DeepCopyInto(*out) + } + if in.OAuth2 != nil { + in, out := &in.OAuth2, &out.OAuth2 + *out = new(OAuth2) + (*in).DeepCopyInto(*out) + } + if in.BearerTokenSecret != nil { + in, out := &in.BearerTokenSecret, &out.BearerTokenSecret + *out = new(corev1.SecretKeySelector) + (*in).DeepCopyInto(*out) + } + if in.FollowRedirects != nil { + in, out := &in.FollowRedirects, &out.FollowRedirects + *out = new(bool) + **out = **in + } + if in.EnableHTTP2 != nil { + in, out := &in.EnableHTTP2, &out.EnableHTTP2 + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HTTPConfigWithoutTLS. +func (in *HTTPConfigWithoutTLS) DeepCopy() *HTTPConfigWithoutTLS { + if in == nil { + return nil + } + out := new(HTTPConfigWithoutTLS) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *HostAlias) DeepCopyInto(out *HostAlias) { + *out = *in + if in.Hostnames != nil { + in, out := &in.Hostnames, &out.Hostnames + *out = make([]string, len(*in)) + copy(*out, *in) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HostAlias. +func (in *HostAlias) DeepCopy() *HostAlias { + if in == nil { + return nil + } + out := new(HostAlias) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *HostPort) DeepCopyInto(out *HostPort) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HostPort. +func (in *HostPort) DeepCopy() *HostPort { + if in == nil { + return nil + } + out := new(HostPort) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ManagedIdentity) DeepCopyInto(out *ManagedIdentity) { + *out = *in + if in.ClientID != nil { + in, out := &in.ClientID, &out.ClientID + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ManagedIdentity. +func (in *ManagedIdentity) DeepCopy() *ManagedIdentity { + if in == nil { + return nil + } + out := new(ManagedIdentity) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MetadataConfig) DeepCopyInto(out *MetadataConfig) { + *out = *in + if in.MaxSamplesPerSend != nil { + in, out := &in.MaxSamplesPerSend, &out.MaxSamplesPerSend + *out = new(int32) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MetadataConfig. +func (in *MetadataConfig) DeepCopy() *MetadataConfig { + if in == nil { + return nil + } + out := new(MetadataConfig) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NamespaceSelector) DeepCopyInto(out *NamespaceSelector) { + *out = *in + if in.MatchNames != nil { + in, out := &in.MatchNames, &out.MatchNames + *out = make([]string, len(*in)) + copy(*out, *in) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NamespaceSelector. +func (in *NamespaceSelector) DeepCopy() *NamespaceSelector { + if in == nil { + return nil + } + out := new(NamespaceSelector) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NativeHistogramConfig) DeepCopyInto(out *NativeHistogramConfig) { + *out = *in + if in.ScrapeNativeHistograms != nil { + in, out := &in.ScrapeNativeHistograms, &out.ScrapeNativeHistograms + *out = new(bool) + **out = **in + } + if in.ScrapeClassicHistograms != nil { + in, out := &in.ScrapeClassicHistograms, &out.ScrapeClassicHistograms + *out = new(bool) + **out = **in + } + if in.NativeHistogramBucketLimit != nil { + in, out := &in.NativeHistogramBucketLimit, &out.NativeHistogramBucketLimit + *out = new(uint64) + **out = **in + } + if in.NativeHistogramMinBucketFactor != nil { + in, out := &in.NativeHistogramMinBucketFactor, &out.NativeHistogramMinBucketFactor + x := (*in).DeepCopy() + *out = &x + } + if in.ConvertClassicHistogramsToNHCB != nil { + in, out := &in.ConvertClassicHistogramsToNHCB, &out.ConvertClassicHistogramsToNHCB + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NativeHistogramConfig. +func (in *NativeHistogramConfig) DeepCopy() *NativeHistogramConfig { + if in == nil { + return nil + } + out := new(NativeHistogramConfig) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *OAuth2) DeepCopyInto(out *OAuth2) { + *out = *in + in.ClientID.DeepCopyInto(&out.ClientID) + in.ClientSecret.DeepCopyInto(&out.ClientSecret) + if in.Scopes != nil { + in, out := &in.Scopes, &out.Scopes + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.EndpointParams != nil { + in, out := &in.EndpointParams, &out.EndpointParams + *out = make(map[string]string, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } + if in.TLSConfig != nil { + in, out := &in.TLSConfig, &out.TLSConfig + *out = new(SafeTLSConfig) + (*in).DeepCopyInto(*out) + } + in.ProxyConfig.DeepCopyInto(&out.ProxyConfig) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OAuth2. +func (in *OAuth2) DeepCopy() *OAuth2 { + if in == nil { + return nil + } + out := new(OAuth2) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *OTLPConfig) DeepCopyInto(out *OTLPConfig) { + *out = *in + if in.PromoteAllResourceAttributes != nil { + in, out := &in.PromoteAllResourceAttributes, &out.PromoteAllResourceAttributes + *out = new(bool) + **out = **in + } + if in.IgnoreResourceAttributes != nil { + in, out := &in.IgnoreResourceAttributes, &out.IgnoreResourceAttributes + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.PromoteResourceAttributes != nil { + in, out := &in.PromoteResourceAttributes, &out.PromoteResourceAttributes + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.TranslationStrategy != nil { + in, out := &in.TranslationStrategy, &out.TranslationStrategy + *out = new(TranslationStrategyOption) + **out = **in + } + if in.KeepIdentifyingResourceAttributes != nil { + in, out := &in.KeepIdentifyingResourceAttributes, &out.KeepIdentifyingResourceAttributes + *out = new(bool) + **out = **in + } + if in.ConvertHistogramsToNHCB != nil { + in, out := &in.ConvertHistogramsToNHCB, &out.ConvertHistogramsToNHCB + *out = new(bool) + **out = **in + } + if in.PromoteScopeMetadata != nil { + in, out := &in.PromoteScopeMetadata, &out.PromoteScopeMetadata + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OTLPConfig. +func (in *OTLPConfig) DeepCopy() *OTLPConfig { + if in == nil { + return nil + } + out := new(OTLPConfig) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ObjectReference) DeepCopyInto(out *ObjectReference) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ObjectReference. +func (in *ObjectReference) DeepCopy() *ObjectReference { + if in == nil { + return nil + } + out := new(ObjectReference) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PodDNSConfig) DeepCopyInto(out *PodDNSConfig) { + *out = *in + if in.Nameservers != nil { + in, out := &in.Nameservers, &out.Nameservers + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.Searches != nil { + in, out := &in.Searches, &out.Searches + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.Options != nil { + in, out := &in.Options, &out.Options + *out = make([]PodDNSConfigOption, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PodDNSConfig. +func (in *PodDNSConfig) DeepCopy() *PodDNSConfig { + if in == nil { + return nil + } + out := new(PodDNSConfig) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PodDNSConfigOption) DeepCopyInto(out *PodDNSConfigOption) { + *out = *in + if in.Value != nil { + in, out := &in.Value, &out.Value + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PodDNSConfigOption. +func (in *PodDNSConfigOption) DeepCopy() *PodDNSConfigOption { + if in == nil { + return nil + } + out := new(PodDNSConfigOption) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PodMetricsEndpoint) DeepCopyInto(out *PodMetricsEndpoint) { + *out = *in + if in.Port != nil { + in, out := &in.Port, &out.Port + *out = new(string) + **out = **in + } + if in.PortNumber != nil { + in, out := &in.PortNumber, &out.PortNumber + *out = new(int32) + **out = **in + } + if in.TargetPort != nil { + in, out := &in.TargetPort, &out.TargetPort + *out = new(intstr.IntOrString) + **out = **in + } + if in.Scheme != nil { + in, out := &in.Scheme, &out.Scheme + *out = new(Scheme) + **out = **in + } + if in.Params != nil { + in, out := &in.Params, &out.Params + *out = make(map[string][]string, len(*in)) + for key, val := range *in { + var outVal []string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = make([]string, len(*in)) + copy(*out, *in) + } + (*out)[key] = outVal + } + } + if in.HonorTimestamps != nil { + in, out := &in.HonorTimestamps, &out.HonorTimestamps + *out = new(bool) + **out = **in + } + if in.TrackTimestampsStaleness != nil { + in, out := &in.TrackTimestampsStaleness, &out.TrackTimestampsStaleness + *out = new(bool) + **out = **in + } + if in.MetricRelabelConfigs != nil { + in, out := &in.MetricRelabelConfigs, &out.MetricRelabelConfigs + *out = make([]RelabelConfig, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.RelabelConfigs != nil { + in, out := &in.RelabelConfigs, &out.RelabelConfigs + *out = make([]RelabelConfig, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.FilterRunning != nil { + in, out := &in.FilterRunning, &out.FilterRunning + *out = new(bool) + **out = **in + } + in.HTTPConfigWithProxy.DeepCopyInto(&out.HTTPConfigWithProxy) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PodMetricsEndpoint. +func (in *PodMetricsEndpoint) DeepCopy() *PodMetricsEndpoint { + if in == nil { + return nil + } + out := new(PodMetricsEndpoint) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PodMonitor) DeepCopyInto(out *PodMonitor) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PodMonitor. +func (in *PodMonitor) DeepCopy() *PodMonitor { + if in == nil { + return nil + } + out := new(PodMonitor) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PodMonitorList) DeepCopyInto(out *PodMonitorList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]PodMonitor, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PodMonitorList. +func (in *PodMonitorList) DeepCopy() *PodMonitorList { + if in == nil { + return nil + } + out := new(PodMonitorList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PodMonitorSpec) DeepCopyInto(out *PodMonitorSpec) { + *out = *in + if in.PodTargetLabels != nil { + in, out := &in.PodTargetLabels, &out.PodTargetLabels + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.PodMetricsEndpoints != nil { + in, out := &in.PodMetricsEndpoints, &out.PodMetricsEndpoints + *out = make([]PodMetricsEndpoint, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + in.Selector.DeepCopyInto(&out.Selector) + if in.SelectorMechanism != nil { + in, out := &in.SelectorMechanism, &out.SelectorMechanism + *out = new(SelectorMechanism) + **out = **in + } + in.NamespaceSelector.DeepCopyInto(&out.NamespaceSelector) + if in.SampleLimit != nil { + in, out := &in.SampleLimit, &out.SampleLimit + *out = new(uint64) + **out = **in + } + if in.TargetLimit != nil { + in, out := &in.TargetLimit, &out.TargetLimit + *out = new(uint64) + **out = **in + } + if in.ScrapeProtocols != nil { + in, out := &in.ScrapeProtocols, &out.ScrapeProtocols + *out = make([]ScrapeProtocol, len(*in)) + copy(*out, *in) + } + if in.FallbackScrapeProtocol != nil { + in, out := &in.FallbackScrapeProtocol, &out.FallbackScrapeProtocol + *out = new(ScrapeProtocol) + **out = **in + } + if in.LabelLimit != nil { + in, out := &in.LabelLimit, &out.LabelLimit + *out = new(uint64) + **out = **in + } + if in.LabelNameLengthLimit != nil { + in, out := &in.LabelNameLengthLimit, &out.LabelNameLengthLimit + *out = new(uint64) + **out = **in + } + if in.LabelValueLengthLimit != nil { + in, out := &in.LabelValueLengthLimit, &out.LabelValueLengthLimit + *out = new(uint64) + **out = **in + } + in.NativeHistogramConfig.DeepCopyInto(&out.NativeHistogramConfig) + if in.KeepDroppedTargets != nil { + in, out := &in.KeepDroppedTargets, &out.KeepDroppedTargets + *out = new(uint64) + **out = **in + } + if in.AttachMetadata != nil { + in, out := &in.AttachMetadata, &out.AttachMetadata + *out = new(AttachMetadata) + (*in).DeepCopyInto(*out) + } + if in.ScrapeClassName != nil { + in, out := &in.ScrapeClassName, &out.ScrapeClassName + *out = new(string) + **out = **in + } + if in.BodySizeLimit != nil { + in, out := &in.BodySizeLimit, &out.BodySizeLimit + *out = new(ByteSize) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PodMonitorSpec. +func (in *PodMonitorSpec) DeepCopy() *PodMonitorSpec { + if in == nil { + return nil + } + out := new(PodMonitorSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Probe) DeepCopyInto(out *Probe) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Probe. +func (in *Probe) DeepCopy() *Probe { + if in == nil { + return nil + } + out := new(Probe) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ProbeList) DeepCopyInto(out *ProbeList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]Probe, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ProbeList. +func (in *ProbeList) DeepCopy() *ProbeList { + if in == nil { + return nil + } + out := new(ProbeList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ProbeParam) DeepCopyInto(out *ProbeParam) { + *out = *in + if in.Values != nil { + in, out := &in.Values, &out.Values + *out = make([]string, len(*in)) + copy(*out, *in) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ProbeParam. +func (in *ProbeParam) DeepCopy() *ProbeParam { + if in == nil { + return nil + } + out := new(ProbeParam) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ProbeSpec) DeepCopyInto(out *ProbeSpec) { + *out = *in + in.ProberSpec.DeepCopyInto(&out.ProberSpec) + in.Targets.DeepCopyInto(&out.Targets) + if in.MetricRelabelConfigs != nil { + in, out := &in.MetricRelabelConfigs, &out.MetricRelabelConfigs + *out = make([]RelabelConfig, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Authorization != nil { + in, out := &in.Authorization, &out.Authorization + *out = new(SafeAuthorization) + (*in).DeepCopyInto(*out) + } + if in.SampleLimit != nil { + in, out := &in.SampleLimit, &out.SampleLimit + *out = new(uint64) + **out = **in + } + if in.TargetLimit != nil { + in, out := &in.TargetLimit, &out.TargetLimit + *out = new(uint64) + **out = **in + } + if in.ScrapeProtocols != nil { + in, out := &in.ScrapeProtocols, &out.ScrapeProtocols + *out = make([]ScrapeProtocol, len(*in)) + copy(*out, *in) + } + if in.FallbackScrapeProtocol != nil { + in, out := &in.FallbackScrapeProtocol, &out.FallbackScrapeProtocol + *out = new(ScrapeProtocol) + **out = **in + } + if in.LabelLimit != nil { + in, out := &in.LabelLimit, &out.LabelLimit + *out = new(uint64) + **out = **in + } + if in.LabelNameLengthLimit != nil { + in, out := &in.LabelNameLengthLimit, &out.LabelNameLengthLimit + *out = new(uint64) + **out = **in + } + if in.LabelValueLengthLimit != nil { + in, out := &in.LabelValueLengthLimit, &out.LabelValueLengthLimit + *out = new(uint64) + **out = **in + } + in.NativeHistogramConfig.DeepCopyInto(&out.NativeHistogramConfig) + if in.KeepDroppedTargets != nil { + in, out := &in.KeepDroppedTargets, &out.KeepDroppedTargets + *out = new(uint64) + **out = **in + } + if in.ScrapeClassName != nil { + in, out := &in.ScrapeClassName, &out.ScrapeClassName + *out = new(string) + **out = **in + } + if in.Params != nil { + in, out := &in.Params, &out.Params + *out = make([]ProbeParam, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + in.HTTPConfig.DeepCopyInto(&out.HTTPConfig) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ProbeSpec. +func (in *ProbeSpec) DeepCopy() *ProbeSpec { + if in == nil { + return nil + } + out := new(ProbeSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ProbeTargetIngress) DeepCopyInto(out *ProbeTargetIngress) { + *out = *in + in.Selector.DeepCopyInto(&out.Selector) + in.NamespaceSelector.DeepCopyInto(&out.NamespaceSelector) + if in.RelabelConfigs != nil { + in, out := &in.RelabelConfigs, &out.RelabelConfigs + *out = make([]RelabelConfig, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ProbeTargetIngress. +func (in *ProbeTargetIngress) DeepCopy() *ProbeTargetIngress { + if in == nil { + return nil + } + out := new(ProbeTargetIngress) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ProbeTargetStaticConfig) DeepCopyInto(out *ProbeTargetStaticConfig) { + *out = *in + if in.Targets != nil { + in, out := &in.Targets, &out.Targets + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.Labels != nil { + in, out := &in.Labels, &out.Labels + *out = make(map[string]string, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } + if in.RelabelConfigs != nil { + in, out := &in.RelabelConfigs, &out.RelabelConfigs + *out = make([]RelabelConfig, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ProbeTargetStaticConfig. +func (in *ProbeTargetStaticConfig) DeepCopy() *ProbeTargetStaticConfig { + if in == nil { + return nil + } + out := new(ProbeTargetStaticConfig) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ProbeTargets) DeepCopyInto(out *ProbeTargets) { + *out = *in + if in.StaticConfig != nil { + in, out := &in.StaticConfig, &out.StaticConfig + *out = new(ProbeTargetStaticConfig) + (*in).DeepCopyInto(*out) + } + if in.Ingress != nil { + in, out := &in.Ingress, &out.Ingress + *out = new(ProbeTargetIngress) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ProbeTargets. +func (in *ProbeTargets) DeepCopy() *ProbeTargets { + if in == nil { + return nil + } + out := new(ProbeTargets) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ProberSpec) DeepCopyInto(out *ProberSpec) { + *out = *in + if in.Scheme != nil { + in, out := &in.Scheme, &out.Scheme + *out = new(Scheme) + **out = **in + } + in.ProxyConfig.DeepCopyInto(&out.ProxyConfig) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ProberSpec. +func (in *ProberSpec) DeepCopy() *ProberSpec { + if in == nil { + return nil + } + out := new(ProberSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Prometheus) DeepCopyInto(out *Prometheus) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Prometheus. +func (in *Prometheus) DeepCopy() *Prometheus { + if in == nil { + return nil + } + out := new(Prometheus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PrometheusList) DeepCopyInto(out *PrometheusList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]Prometheus, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PrometheusList. +func (in *PrometheusList) DeepCopy() *PrometheusList { + if in == nil { + return nil + } + out := new(PrometheusList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PrometheusRule) DeepCopyInto(out *PrometheusRule) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PrometheusRule. +func (in *PrometheusRule) DeepCopy() *PrometheusRule { + if in == nil { + return nil + } + out := new(PrometheusRule) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PrometheusRuleExcludeConfig) DeepCopyInto(out *PrometheusRuleExcludeConfig) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PrometheusRuleExcludeConfig. +func (in *PrometheusRuleExcludeConfig) DeepCopy() *PrometheusRuleExcludeConfig { + if in == nil { + return nil + } + out := new(PrometheusRuleExcludeConfig) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PrometheusRuleList) DeepCopyInto(out *PrometheusRuleList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]PrometheusRule, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PrometheusRuleList. +func (in *PrometheusRuleList) DeepCopy() *PrometheusRuleList { + if in == nil { + return nil + } + out := new(PrometheusRuleList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PrometheusRuleSpec) DeepCopyInto(out *PrometheusRuleSpec) { + *out = *in + if in.Groups != nil { + in, out := &in.Groups, &out.Groups + *out = make([]RuleGroup, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PrometheusRuleSpec. +func (in *PrometheusRuleSpec) DeepCopy() *PrometheusRuleSpec { + if in == nil { + return nil + } + out := new(PrometheusRuleSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PrometheusSpec) DeepCopyInto(out *PrometheusSpec) { + *out = *in + in.CommonPrometheusFields.DeepCopyInto(&out.CommonPrometheusFields) + if in.ShardRetentionPolicy != nil { + in, out := &in.ShardRetentionPolicy, &out.ShardRetentionPolicy + *out = new(ShardRetentionPolicy) + (*in).DeepCopyInto(*out) + } + out.Rules = in.Rules + if in.PrometheusRulesExcludedFromEnforce != nil { + in, out := &in.PrometheusRulesExcludedFromEnforce, &out.PrometheusRulesExcludedFromEnforce + *out = make([]PrometheusRuleExcludeConfig, len(*in)) + copy(*out, *in) + } + if in.RuleSelector != nil { + in, out := &in.RuleSelector, &out.RuleSelector + *out = new(metav1.LabelSelector) + (*in).DeepCopyInto(*out) + } + if in.RuleNamespaceSelector != nil { + in, out := &in.RuleNamespaceSelector, &out.RuleNamespaceSelector + *out = new(metav1.LabelSelector) + (*in).DeepCopyInto(*out) + } + if in.Query != nil { + in, out := &in.Query, &out.Query + *out = new(QuerySpec) + (*in).DeepCopyInto(*out) + } + if in.Alerting != nil { + in, out := &in.Alerting, &out.Alerting + *out = new(AlertingSpec) + (*in).DeepCopyInto(*out) + } + if in.AdditionalAlertRelabelConfigs != nil { + in, out := &in.AdditionalAlertRelabelConfigs, &out.AdditionalAlertRelabelConfigs + *out = new(corev1.SecretKeySelector) + (*in).DeepCopyInto(*out) + } + if in.AdditionalAlertManagerConfigs != nil { + in, out := &in.AdditionalAlertManagerConfigs, &out.AdditionalAlertManagerConfigs + *out = new(corev1.SecretKeySelector) + (*in).DeepCopyInto(*out) + } + if in.RemoteRead != nil { + in, out := &in.RemoteRead, &out.RemoteRead + *out = make([]RemoteReadSpec, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Thanos != nil { + in, out := &in.Thanos, &out.Thanos + *out = new(ThanosSpec) + (*in).DeepCopyInto(*out) + } + if in.Exemplars != nil { + in, out := &in.Exemplars, &out.Exemplars + *out = new(Exemplars) + (*in).DeepCopyInto(*out) + } + if in.RuleQueryOffset != nil { + in, out := &in.RuleQueryOffset, &out.RuleQueryOffset + *out = new(Duration) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PrometheusSpec. +func (in *PrometheusSpec) DeepCopy() *PrometheusSpec { + if in == nil { + return nil + } + out := new(PrometheusSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PrometheusStatus) DeepCopyInto(out *PrometheusStatus) { + *out = *in + if in.Conditions != nil { + in, out := &in.Conditions, &out.Conditions + *out = make([]Condition, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.ShardStatuses != nil { + in, out := &in.ShardStatuses, &out.ShardStatuses + *out = make([]ShardStatus, len(*in)) + copy(*out, *in) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PrometheusStatus. +func (in *PrometheusStatus) DeepCopy() *PrometheusStatus { + if in == nil { + return nil + } + out := new(PrometheusStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PrometheusWebSpec) DeepCopyInto(out *PrometheusWebSpec) { + *out = *in + in.WebConfigFileFields.DeepCopyInto(&out.WebConfigFileFields) + if in.PageTitle != nil { + in, out := &in.PageTitle, &out.PageTitle + *out = new(string) + **out = **in + } + if in.MaxConnections != nil { + in, out := &in.MaxConnections, &out.MaxConnections + *out = new(int32) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PrometheusWebSpec. +func (in *PrometheusWebSpec) DeepCopy() *PrometheusWebSpec { + if in == nil { + return nil + } + out := new(PrometheusWebSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ProxyConfig) DeepCopyInto(out *ProxyConfig) { + *out = *in + if in.ProxyURL != nil { + in, out := &in.ProxyURL, &out.ProxyURL + *out = new(string) + **out = **in + } + if in.NoProxy != nil { + in, out := &in.NoProxy, &out.NoProxy + *out = new(string) + **out = **in + } + if in.ProxyFromEnvironment != nil { + in, out := &in.ProxyFromEnvironment, &out.ProxyFromEnvironment + *out = new(bool) + **out = **in + } + if in.ProxyConnectHeader != nil { + in, out := &in.ProxyConnectHeader, &out.ProxyConnectHeader + *out = make(map[string][]corev1.SecretKeySelector, len(*in)) + for key, val := range *in { + var outVal []corev1.SecretKeySelector + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = make([]corev1.SecretKeySelector, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + (*out)[key] = outVal + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ProxyConfig. +func (in *ProxyConfig) DeepCopy() *ProxyConfig { + if in == nil { + return nil + } + out := new(ProxyConfig) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *QuerySpec) DeepCopyInto(out *QuerySpec) { + *out = *in + if in.LookbackDelta != nil { + in, out := &in.LookbackDelta, &out.LookbackDelta + *out = new(string) + **out = **in + } + if in.MaxConcurrency != nil { + in, out := &in.MaxConcurrency, &out.MaxConcurrency + *out = new(int32) + **out = **in + } + if in.MaxSamples != nil { + in, out := &in.MaxSamples, &out.MaxSamples + *out = new(int32) + **out = **in + } + if in.Timeout != nil { + in, out := &in.Timeout, &out.Timeout + *out = new(Duration) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new QuerySpec. +func (in *QuerySpec) DeepCopy() *QuerySpec { + if in == nil { + return nil + } + out := new(QuerySpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *QueueConfig) DeepCopyInto(out *QueueConfig) { + *out = *in + if in.BatchSendDeadline != nil { + in, out := &in.BatchSendDeadline, &out.BatchSendDeadline + *out = new(Duration) + **out = **in + } + if in.MinBackoff != nil { + in, out := &in.MinBackoff, &out.MinBackoff + *out = new(Duration) + **out = **in + } + if in.MaxBackoff != nil { + in, out := &in.MaxBackoff, &out.MaxBackoff + *out = new(Duration) + **out = **in + } + if in.SampleAgeLimit != nil { + in, out := &in.SampleAgeLimit, &out.SampleAgeLimit + *out = new(Duration) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new QueueConfig. +func (in *QueueConfig) DeepCopy() *QueueConfig { + if in == nil { + return nil + } + out := new(QueueConfig) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RelabelConfig) DeepCopyInto(out *RelabelConfig) { + *out = *in + if in.SourceLabels != nil { + in, out := &in.SourceLabels, &out.SourceLabels + *out = make([]LabelName, len(*in)) + copy(*out, *in) + } + if in.Separator != nil { + in, out := &in.Separator, &out.Separator + *out = new(string) + **out = **in + } + if in.Replacement != nil { + in, out := &in.Replacement, &out.Replacement + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RelabelConfig. +func (in *RelabelConfig) DeepCopy() *RelabelConfig { + if in == nil { + return nil + } + out := new(RelabelConfig) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RemoteReadSpec) DeepCopyInto(out *RemoteReadSpec) { + *out = *in + if in.RequiredMatchers != nil { + in, out := &in.RequiredMatchers, &out.RequiredMatchers + *out = make(map[string]string, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } + if in.RemoteTimeout != nil { + in, out := &in.RemoteTimeout, &out.RemoteTimeout + *out = new(Duration) + **out = **in + } + if in.Headers != nil { + in, out := &in.Headers, &out.Headers + *out = make(map[string]string, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } + if in.OAuth2 != nil { + in, out := &in.OAuth2, &out.OAuth2 + *out = new(OAuth2) + (*in).DeepCopyInto(*out) + } + if in.BasicAuth != nil { + in, out := &in.BasicAuth, &out.BasicAuth + *out = new(BasicAuth) + (*in).DeepCopyInto(*out) + } + if in.Authorization != nil { + in, out := &in.Authorization, &out.Authorization + *out = new(Authorization) + (*in).DeepCopyInto(*out) + } + if in.TLSConfig != nil { + in, out := &in.TLSConfig, &out.TLSConfig + *out = new(TLSConfig) + (*in).DeepCopyInto(*out) + } + in.ProxyConfig.DeepCopyInto(&out.ProxyConfig) + if in.FollowRedirects != nil { + in, out := &in.FollowRedirects, &out.FollowRedirects + *out = new(bool) + **out = **in + } + if in.FilterExternalLabels != nil { + in, out := &in.FilterExternalLabels, &out.FilterExternalLabels + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RemoteReadSpec. +func (in *RemoteReadSpec) DeepCopy() *RemoteReadSpec { + if in == nil { + return nil + } + out := new(RemoteReadSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RemoteWriteSpec) DeepCopyInto(out *RemoteWriteSpec) { + *out = *in + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.MessageVersion != nil { + in, out := &in.MessageVersion, &out.MessageVersion + *out = new(RemoteWriteMessageVersion) + **out = **in + } + if in.SendExemplars != nil { + in, out := &in.SendExemplars, &out.SendExemplars + *out = new(bool) + **out = **in + } + if in.SendNativeHistograms != nil { + in, out := &in.SendNativeHistograms, &out.SendNativeHistograms + *out = new(bool) + **out = **in + } + if in.RemoteTimeout != nil { + in, out := &in.RemoteTimeout, &out.RemoteTimeout + *out = new(Duration) + **out = **in + } + if in.Headers != nil { + in, out := &in.Headers, &out.Headers + *out = make(map[string]string, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } + if in.WriteRelabelConfigs != nil { + in, out := &in.WriteRelabelConfigs, &out.WriteRelabelConfigs + *out = make([]RelabelConfig, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.OAuth2 != nil { + in, out := &in.OAuth2, &out.OAuth2 + *out = new(OAuth2) + (*in).DeepCopyInto(*out) + } + if in.BasicAuth != nil { + in, out := &in.BasicAuth, &out.BasicAuth + *out = new(BasicAuth) + (*in).DeepCopyInto(*out) + } + if in.Authorization != nil { + in, out := &in.Authorization, &out.Authorization + *out = new(Authorization) + (*in).DeepCopyInto(*out) + } + if in.Sigv4 != nil { + in, out := &in.Sigv4, &out.Sigv4 + *out = new(Sigv4) + (*in).DeepCopyInto(*out) + } + if in.AzureAD != nil { + in, out := &in.AzureAD, &out.AzureAD + *out = new(AzureAD) + (*in).DeepCopyInto(*out) + } + if in.TLSConfig != nil { + in, out := &in.TLSConfig, &out.TLSConfig + *out = new(TLSConfig) + (*in).DeepCopyInto(*out) + } + in.ProxyConfig.DeepCopyInto(&out.ProxyConfig) + if in.FollowRedirects != nil { + in, out := &in.FollowRedirects, &out.FollowRedirects + *out = new(bool) + **out = **in + } + if in.QueueConfig != nil { + in, out := &in.QueueConfig, &out.QueueConfig + *out = new(QueueConfig) + (*in).DeepCopyInto(*out) + } + if in.MetadataConfig != nil { + in, out := &in.MetadataConfig, &out.MetadataConfig + *out = new(MetadataConfig) + (*in).DeepCopyInto(*out) + } + if in.EnableHttp2 != nil { + in, out := &in.EnableHttp2, &out.EnableHttp2 + *out = new(bool) + **out = **in + } + if in.RoundRobinDNS != nil { + in, out := &in.RoundRobinDNS, &out.RoundRobinDNS + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RemoteWriteSpec. +func (in *RemoteWriteSpec) DeepCopy() *RemoteWriteSpec { + if in == nil { + return nil + } + out := new(RemoteWriteSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RetainConfig) DeepCopyInto(out *RetainConfig) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RetainConfig. +func (in *RetainConfig) DeepCopy() *RetainConfig { + if in == nil { + return nil + } + out := new(RetainConfig) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RollingUpdateStatefulSetStrategy) DeepCopyInto(out *RollingUpdateStatefulSetStrategy) { + *out = *in + if in.MaxUnavailable != nil { + in, out := &in.MaxUnavailable, &out.MaxUnavailable + *out = new(intstr.IntOrString) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RollingUpdateStatefulSetStrategy. +func (in *RollingUpdateStatefulSetStrategy) DeepCopy() *RollingUpdateStatefulSetStrategy { + if in == nil { + return nil + } + out := new(RollingUpdateStatefulSetStrategy) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Rule) DeepCopyInto(out *Rule) { + *out = *in + out.Expr = in.Expr + if in.For != nil { + in, out := &in.For, &out.For + *out = new(Duration) + **out = **in + } + if in.KeepFiringFor != nil { + in, out := &in.KeepFiringFor, &out.KeepFiringFor + *out = new(NonEmptyDuration) + **out = **in + } + if in.Labels != nil { + in, out := &in.Labels, &out.Labels + *out = make(map[string]string, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } + if in.Annotations != nil { + in, out := &in.Annotations, &out.Annotations + *out = make(map[string]string, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Rule. +func (in *Rule) DeepCopy() *Rule { + if in == nil { + return nil + } + out := new(Rule) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RuleGroup) DeepCopyInto(out *RuleGroup) { + *out = *in + if in.Labels != nil { + in, out := &in.Labels, &out.Labels + *out = make(map[string]string, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } + if in.Interval != nil { + in, out := &in.Interval, &out.Interval + *out = new(Duration) + **out = **in + } + if in.QueryOffset != nil { + in, out := &in.QueryOffset, &out.QueryOffset + *out = new(Duration) + **out = **in + } + if in.Rules != nil { + in, out := &in.Rules, &out.Rules + *out = make([]Rule, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Limit != nil { + in, out := &in.Limit, &out.Limit + *out = new(int) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RuleGroup. +func (in *RuleGroup) DeepCopy() *RuleGroup { + if in == nil { + return nil + } + out := new(RuleGroup) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Rules) DeepCopyInto(out *Rules) { + *out = *in + out.Alert = in.Alert +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Rules. +func (in *Rules) DeepCopy() *Rules { + if in == nil { + return nil + } + out := new(Rules) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RulesAlert) DeepCopyInto(out *RulesAlert) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RulesAlert. +func (in *RulesAlert) DeepCopy() *RulesAlert { + if in == nil { + return nil + } + out := new(RulesAlert) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RuntimeConfig) DeepCopyInto(out *RuntimeConfig) { + *out = *in + if in.GoGC != nil { + in, out := &in.GoGC, &out.GoGC + *out = new(int32) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RuntimeConfig. +func (in *RuntimeConfig) DeepCopy() *RuntimeConfig { + if in == nil { + return nil + } + out := new(RuntimeConfig) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SafeAuthorization) DeepCopyInto(out *SafeAuthorization) { + *out = *in + if in.Credentials != nil { + in, out := &in.Credentials, &out.Credentials + *out = new(corev1.SecretKeySelector) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SafeAuthorization. +func (in *SafeAuthorization) DeepCopy() *SafeAuthorization { + if in == nil { + return nil + } + out := new(SafeAuthorization) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SafeTLSConfig) DeepCopyInto(out *SafeTLSConfig) { + *out = *in + in.CA.DeepCopyInto(&out.CA) + in.Cert.DeepCopyInto(&out.Cert) + if in.KeySecret != nil { + in, out := &in.KeySecret, &out.KeySecret + *out = new(corev1.SecretKeySelector) + (*in).DeepCopyInto(*out) + } + if in.ServerName != nil { + in, out := &in.ServerName, &out.ServerName + *out = new(string) + **out = **in + } + if in.InsecureSkipVerify != nil { + in, out := &in.InsecureSkipVerify, &out.InsecureSkipVerify + *out = new(bool) + **out = **in + } + if in.MinVersion != nil { + in, out := &in.MinVersion, &out.MinVersion + *out = new(TLSVersion) + **out = **in + } + if in.MaxVersion != nil { + in, out := &in.MaxVersion, &out.MaxVersion + *out = new(TLSVersion) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SafeTLSConfig. +func (in *SafeTLSConfig) DeepCopy() *SafeTLSConfig { + if in == nil { + return nil + } + out := new(SafeTLSConfig) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ScrapeClass) DeepCopyInto(out *ScrapeClass) { + *out = *in + if in.Default != nil { + in, out := &in.Default, &out.Default + *out = new(bool) + **out = **in + } + if in.FallbackScrapeProtocol != nil { + in, out := &in.FallbackScrapeProtocol, &out.FallbackScrapeProtocol + *out = new(ScrapeProtocol) + **out = **in + } + if in.TLSConfig != nil { + in, out := &in.TLSConfig, &out.TLSConfig + *out = new(TLSConfig) + (*in).DeepCopyInto(*out) + } + if in.Authorization != nil { + in, out := &in.Authorization, &out.Authorization + *out = new(Authorization) + (*in).DeepCopyInto(*out) + } + if in.Relabelings != nil { + in, out := &in.Relabelings, &out.Relabelings + *out = make([]RelabelConfig, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.MetricRelabelings != nil { + in, out := &in.MetricRelabelings, &out.MetricRelabelings + *out = make([]RelabelConfig, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.AttachMetadata != nil { + in, out := &in.AttachMetadata, &out.AttachMetadata + *out = new(AttachMetadata) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ScrapeClass. +func (in *ScrapeClass) DeepCopy() *ScrapeClass { + if in == nil { + return nil + } + out := new(ScrapeClass) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SecretOrConfigMap) DeepCopyInto(out *SecretOrConfigMap) { + *out = *in + if in.Secret != nil { + in, out := &in.Secret, &out.Secret + *out = new(corev1.SecretKeySelector) + (*in).DeepCopyInto(*out) + } + if in.ConfigMap != nil { + in, out := &in.ConfigMap, &out.ConfigMap + *out = new(corev1.ConfigMapKeySelector) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SecretOrConfigMap. +func (in *SecretOrConfigMap) DeepCopy() *SecretOrConfigMap { + if in == nil { + return nil + } + out := new(SecretOrConfigMap) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ServiceMonitor) DeepCopyInto(out *ServiceMonitor) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ServiceMonitor. +func (in *ServiceMonitor) DeepCopy() *ServiceMonitor { + if in == nil { + return nil + } + out := new(ServiceMonitor) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ServiceMonitorList) DeepCopyInto(out *ServiceMonitorList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]ServiceMonitor, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ServiceMonitorList. +func (in *ServiceMonitorList) DeepCopy() *ServiceMonitorList { + if in == nil { + return nil + } + out := new(ServiceMonitorList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ServiceMonitorSpec) DeepCopyInto(out *ServiceMonitorSpec) { + *out = *in + if in.TargetLabels != nil { + in, out := &in.TargetLabels, &out.TargetLabels + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.PodTargetLabels != nil { + in, out := &in.PodTargetLabels, &out.PodTargetLabels + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.Endpoints != nil { + in, out := &in.Endpoints, &out.Endpoints + *out = make([]Endpoint, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + in.Selector.DeepCopyInto(&out.Selector) + if in.SelectorMechanism != nil { + in, out := &in.SelectorMechanism, &out.SelectorMechanism + *out = new(SelectorMechanism) + **out = **in + } + in.NamespaceSelector.DeepCopyInto(&out.NamespaceSelector) + if in.SampleLimit != nil { + in, out := &in.SampleLimit, &out.SampleLimit + *out = new(uint64) + **out = **in + } + if in.ScrapeProtocols != nil { + in, out := &in.ScrapeProtocols, &out.ScrapeProtocols + *out = make([]ScrapeProtocol, len(*in)) + copy(*out, *in) + } + if in.FallbackScrapeProtocol != nil { + in, out := &in.FallbackScrapeProtocol, &out.FallbackScrapeProtocol + *out = new(ScrapeProtocol) + **out = **in + } + if in.TargetLimit != nil { + in, out := &in.TargetLimit, &out.TargetLimit + *out = new(uint64) + **out = **in + } + if in.LabelLimit != nil { + in, out := &in.LabelLimit, &out.LabelLimit + *out = new(uint64) + **out = **in + } + if in.LabelNameLengthLimit != nil { + in, out := &in.LabelNameLengthLimit, &out.LabelNameLengthLimit + *out = new(uint64) + **out = **in + } + if in.LabelValueLengthLimit != nil { + in, out := &in.LabelValueLengthLimit, &out.LabelValueLengthLimit + *out = new(uint64) + **out = **in + } + in.NativeHistogramConfig.DeepCopyInto(&out.NativeHistogramConfig) + if in.KeepDroppedTargets != nil { + in, out := &in.KeepDroppedTargets, &out.KeepDroppedTargets + *out = new(uint64) + **out = **in + } + if in.AttachMetadata != nil { + in, out := &in.AttachMetadata, &out.AttachMetadata + *out = new(AttachMetadata) + (*in).DeepCopyInto(*out) + } + if in.ScrapeClassName != nil { + in, out := &in.ScrapeClassName, &out.ScrapeClassName + *out = new(string) + **out = **in + } + if in.BodySizeLimit != nil { + in, out := &in.BodySizeLimit, &out.BodySizeLimit + *out = new(ByteSize) + **out = **in + } + if in.ServiceDiscoveryRole != nil { + in, out := &in.ServiceDiscoveryRole, &out.ServiceDiscoveryRole + *out = new(ServiceDiscoveryRole) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ServiceMonitorSpec. +func (in *ServiceMonitorSpec) DeepCopy() *ServiceMonitorSpec { + if in == nil { + return nil + } + out := new(ServiceMonitorSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ShardRetentionPolicy) DeepCopyInto(out *ShardRetentionPolicy) { + *out = *in + if in.WhenScaled != nil { + in, out := &in.WhenScaled, &out.WhenScaled + *out = new(WhenScaledRetentionType) + **out = **in + } + if in.Retain != nil { + in, out := &in.Retain, &out.Retain + *out = new(RetainConfig) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ShardRetentionPolicy. +func (in *ShardRetentionPolicy) DeepCopy() *ShardRetentionPolicy { + if in == nil { + return nil + } + out := new(ShardRetentionPolicy) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ShardStatus) DeepCopyInto(out *ShardStatus) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ShardStatus. +func (in *ShardStatus) DeepCopy() *ShardStatus { + if in == nil { + return nil + } + out := new(ShardStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Sigv4) DeepCopyInto(out *Sigv4) { + *out = *in + if in.AccessKey != nil { + in, out := &in.AccessKey, &out.AccessKey + *out = new(corev1.SecretKeySelector) + (*in).DeepCopyInto(*out) + } + if in.SecretKey != nil { + in, out := &in.SecretKey, &out.SecretKey + *out = new(corev1.SecretKeySelector) + (*in).DeepCopyInto(*out) + } + if in.UseFIPSSTSEndpoint != nil { + in, out := &in.UseFIPSSTSEndpoint, &out.UseFIPSSTSEndpoint + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Sigv4. +func (in *Sigv4) DeepCopy() *Sigv4 { + if in == nil { + return nil + } + out := new(Sigv4) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *StatefulSetUpdateStrategy) DeepCopyInto(out *StatefulSetUpdateStrategy) { + *out = *in + if in.RollingUpdate != nil { + in, out := &in.RollingUpdate, &out.RollingUpdate + *out = new(RollingUpdateStatefulSetStrategy) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StatefulSetUpdateStrategy. +func (in *StatefulSetUpdateStrategy) DeepCopy() *StatefulSetUpdateStrategy { + if in == nil { + return nil + } + out := new(StatefulSetUpdateStrategy) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *StorageSpec) DeepCopyInto(out *StorageSpec) { + *out = *in + if in.EmptyDir != nil { + in, out := &in.EmptyDir, &out.EmptyDir + *out = new(corev1.EmptyDirVolumeSource) + (*in).DeepCopyInto(*out) + } + if in.Ephemeral != nil { + in, out := &in.Ephemeral, &out.Ephemeral + *out = new(corev1.EphemeralVolumeSource) + (*in).DeepCopyInto(*out) + } + in.VolumeClaimTemplate.DeepCopyInto(&out.VolumeClaimTemplate) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StorageSpec. +func (in *StorageSpec) DeepCopy() *StorageSpec { + if in == nil { + return nil + } + out := new(StorageSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TLSConfig) DeepCopyInto(out *TLSConfig) { + *out = *in + in.SafeTLSConfig.DeepCopyInto(&out.SafeTLSConfig) + out.TLSFilesConfig = in.TLSFilesConfig +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TLSConfig. +func (in *TLSConfig) DeepCopy() *TLSConfig { + if in == nil { + return nil + } + out := new(TLSConfig) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TLSFilesConfig) DeepCopyInto(out *TLSFilesConfig) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TLSFilesConfig. +func (in *TLSFilesConfig) DeepCopy() *TLSFilesConfig { + if in == nil { + return nil + } + out := new(TLSFilesConfig) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TSDBSpec) DeepCopyInto(out *TSDBSpec) { + *out = *in + if in.OutOfOrderTimeWindow != nil { + in, out := &in.OutOfOrderTimeWindow, &out.OutOfOrderTimeWindow + *out = new(Duration) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TSDBSpec. +func (in *TSDBSpec) DeepCopy() *TSDBSpec { + if in == nil { + return nil + } + out := new(TSDBSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ThanosRuler) DeepCopyInto(out *ThanosRuler) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ThanosRuler. +func (in *ThanosRuler) DeepCopy() *ThanosRuler { + if in == nil { + return nil + } + out := new(ThanosRuler) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ThanosRulerList) DeepCopyInto(out *ThanosRulerList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]ThanosRuler, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ThanosRulerList. +func (in *ThanosRulerList) DeepCopy() *ThanosRulerList { + if in == nil { + return nil + } + out := new(ThanosRulerList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ThanosRulerSpec) DeepCopyInto(out *ThanosRulerSpec) { + *out = *in + if in.Version != nil { + in, out := &in.Version, &out.Version + *out = new(string) + **out = **in + } + if in.PodMetadata != nil { + in, out := &in.PodMetadata, &out.PodMetadata + *out = new(EmbeddedObjectMetadata) + (*in).DeepCopyInto(*out) + } + if in.ImagePullSecrets != nil { + in, out := &in.ImagePullSecrets, &out.ImagePullSecrets + *out = make([]corev1.LocalObjectReference, len(*in)) + copy(*out, *in) + } + if in.Replicas != nil { + in, out := &in.Replicas, &out.Replicas + *out = new(int32) + **out = **in + } + if in.NodeSelector != nil { + in, out := &in.NodeSelector, &out.NodeSelector + *out = make(map[string]string, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } + in.Resources.DeepCopyInto(&out.Resources) + if in.Affinity != nil { + in, out := &in.Affinity, &out.Affinity + *out = new(corev1.Affinity) + (*in).DeepCopyInto(*out) + } + if in.Tolerations != nil { + in, out := &in.Tolerations, &out.Tolerations + *out = make([]corev1.Toleration, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.TopologySpreadConstraints != nil { + in, out := &in.TopologySpreadConstraints, &out.TopologySpreadConstraints + *out = make([]corev1.TopologySpreadConstraint, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.SecurityContext != nil { + in, out := &in.SecurityContext, &out.SecurityContext + *out = new(corev1.PodSecurityContext) + (*in).DeepCopyInto(*out) + } + if in.DNSPolicy != nil { + in, out := &in.DNSPolicy, &out.DNSPolicy + *out = new(DNSPolicy) + **out = **in + } + if in.DNSConfig != nil { + in, out := &in.DNSConfig, &out.DNSConfig + *out = new(PodDNSConfig) + (*in).DeepCopyInto(*out) + } + if in.EnableServiceLinks != nil { + in, out := &in.EnableServiceLinks, &out.EnableServiceLinks + *out = new(bool) + **out = **in + } + if in.ServiceName != nil { + in, out := &in.ServiceName, &out.ServiceName + *out = new(string) + **out = **in + } + if in.Storage != nil { + in, out := &in.Storage, &out.Storage + *out = new(StorageSpec) + (*in).DeepCopyInto(*out) + } + if in.Volumes != nil { + in, out := &in.Volumes, &out.Volumes + *out = make([]corev1.Volume, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.VolumeMounts != nil { + in, out := &in.VolumeMounts, &out.VolumeMounts + *out = make([]corev1.VolumeMount, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.ObjectStorageConfig != nil { + in, out := &in.ObjectStorageConfig, &out.ObjectStorageConfig + *out = new(corev1.SecretKeySelector) + (*in).DeepCopyInto(*out) + } + if in.ObjectStorageConfigFile != nil { + in, out := &in.ObjectStorageConfigFile, &out.ObjectStorageConfigFile + *out = new(string) + **out = **in + } + if in.PodManagementPolicy != nil { + in, out := &in.PodManagementPolicy, &out.PodManagementPolicy + *out = new(PodManagementPolicyType) + **out = **in + } + if in.UpdateStrategy != nil { + in, out := &in.UpdateStrategy, &out.UpdateStrategy + *out = new(StatefulSetUpdateStrategy) + (*in).DeepCopyInto(*out) + } + if in.QueryEndpoints != nil { + in, out := &in.QueryEndpoints, &out.QueryEndpoints + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.QueryConfig != nil { + in, out := &in.QueryConfig, &out.QueryConfig + *out = new(corev1.SecretKeySelector) + (*in).DeepCopyInto(*out) + } + if in.AlertManagersURL != nil { + in, out := &in.AlertManagersURL, &out.AlertManagersURL + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.AlertManagersConfig != nil { + in, out := &in.AlertManagersConfig, &out.AlertManagersConfig + *out = new(corev1.SecretKeySelector) + (*in).DeepCopyInto(*out) + } + if in.RuleSelector != nil { + in, out := &in.RuleSelector, &out.RuleSelector + *out = new(metav1.LabelSelector) + (*in).DeepCopyInto(*out) + } + if in.RuleNamespaceSelector != nil { + in, out := &in.RuleNamespaceSelector, &out.RuleNamespaceSelector + *out = new(metav1.LabelSelector) + (*in).DeepCopyInto(*out) + } + if in.ExcludedFromEnforcement != nil { + in, out := &in.ExcludedFromEnforcement, &out.ExcludedFromEnforcement + *out = make([]ObjectReference, len(*in)) + copy(*out, *in) + } + if in.PrometheusRulesExcludedFromEnforce != nil { + in, out := &in.PrometheusRulesExcludedFromEnforce, &out.PrometheusRulesExcludedFromEnforce + *out = make([]PrometheusRuleExcludeConfig, len(*in)) + copy(*out, *in) + } + if in.ResendDelay != nil { + in, out := &in.ResendDelay, &out.ResendDelay + *out = new(Duration) + **out = **in + } + if in.RuleOutageTolerance != nil { + in, out := &in.RuleOutageTolerance, &out.RuleOutageTolerance + *out = new(Duration) + **out = **in + } + if in.RuleQueryOffset != nil { + in, out := &in.RuleQueryOffset, &out.RuleQueryOffset + *out = new(Duration) + **out = **in + } + if in.RuleConcurrentEval != nil { + in, out := &in.RuleConcurrentEval, &out.RuleConcurrentEval + *out = new(int32) + **out = **in + } + if in.RuleGracePeriod != nil { + in, out := &in.RuleGracePeriod, &out.RuleGracePeriod + *out = new(Duration) + **out = **in + } + if in.Containers != nil { + in, out := &in.Containers, &out.Containers + *out = make([]corev1.Container, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.InitContainers != nil { + in, out := &in.InitContainers, &out.InitContainers + *out = make([]corev1.Container, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.TracingConfig != nil { + in, out := &in.TracingConfig, &out.TracingConfig + *out = new(corev1.SecretKeySelector) + (*in).DeepCopyInto(*out) + } + if in.Labels != nil { + in, out := &in.Labels, &out.Labels + *out = make(map[string]string, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } + if in.AlertDropLabels != nil { + in, out := &in.AlertDropLabels, &out.AlertDropLabels + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.GRPCServerTLSConfig != nil { + in, out := &in.GRPCServerTLSConfig, &out.GRPCServerTLSConfig + *out = new(TLSConfig) + (*in).DeepCopyInto(*out) + } + if in.MinReadySeconds != nil { + in, out := &in.MinReadySeconds, &out.MinReadySeconds + *out = new(int32) + **out = **in + } + if in.AlertRelabelConfigs != nil { + in, out := &in.AlertRelabelConfigs, &out.AlertRelabelConfigs + *out = new(corev1.SecretKeySelector) + (*in).DeepCopyInto(*out) + } + if in.AlertRelabelConfigFile != nil { + in, out := &in.AlertRelabelConfigFile, &out.AlertRelabelConfigFile + *out = new(string) + **out = **in + } + if in.HostAliases != nil { + in, out := &in.HostAliases, &out.HostAliases + *out = make([]HostAlias, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.AdditionalArgs != nil { + in, out := &in.AdditionalArgs, &out.AdditionalArgs + *out = make([]Argument, len(*in)) + copy(*out, *in) + } + if in.Web != nil { + in, out := &in.Web, &out.Web + *out = new(ThanosRulerWebSpec) + (*in).DeepCopyInto(*out) + } + if in.RemoteWrite != nil { + in, out := &in.RemoteWrite, &out.RemoteWrite + *out = make([]RemoteWriteSpec, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.TerminationGracePeriodSeconds != nil { + in, out := &in.TerminationGracePeriodSeconds, &out.TerminationGracePeriodSeconds + *out = new(int64) + **out = **in + } + if in.EnableFeatures != nil { + in, out := &in.EnableFeatures, &out.EnableFeatures + *out = make([]EnableFeature, len(*in)) + copy(*out, *in) + } + if in.HostUsers != nil { + in, out := &in.HostUsers, &out.HostUsers + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ThanosRulerSpec. +func (in *ThanosRulerSpec) DeepCopy() *ThanosRulerSpec { + if in == nil { + return nil + } + out := new(ThanosRulerSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ThanosRulerStatus) DeepCopyInto(out *ThanosRulerStatus) { + *out = *in + if in.Conditions != nil { + in, out := &in.Conditions, &out.Conditions + *out = make([]Condition, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ThanosRulerStatus. +func (in *ThanosRulerStatus) DeepCopy() *ThanosRulerStatus { + if in == nil { + return nil + } + out := new(ThanosRulerStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ThanosRulerWebSpec) DeepCopyInto(out *ThanosRulerWebSpec) { + *out = *in + in.WebConfigFileFields.DeepCopyInto(&out.WebConfigFileFields) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ThanosRulerWebSpec. +func (in *ThanosRulerWebSpec) DeepCopy() *ThanosRulerWebSpec { + if in == nil { + return nil + } + out := new(ThanosRulerWebSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ThanosSpec) DeepCopyInto(out *ThanosSpec) { + *out = *in + if in.Image != nil { + in, out := &in.Image, &out.Image + *out = new(string) + **out = **in + } + if in.Version != nil { + in, out := &in.Version, &out.Version + *out = new(string) + **out = **in + } + if in.Tag != nil { + in, out := &in.Tag, &out.Tag + *out = new(string) + **out = **in + } + if in.SHA != nil { + in, out := &in.SHA, &out.SHA + *out = new(string) + **out = **in + } + if in.BaseImage != nil { + in, out := &in.BaseImage, &out.BaseImage + *out = new(string) + **out = **in + } + in.Resources.DeepCopyInto(&out.Resources) + if in.ObjectStorageConfig != nil { + in, out := &in.ObjectStorageConfig, &out.ObjectStorageConfig + *out = new(corev1.SecretKeySelector) + (*in).DeepCopyInto(*out) + } + if in.ObjectStorageConfigFile != nil { + in, out := &in.ObjectStorageConfigFile, &out.ObjectStorageConfigFile + *out = new(string) + **out = **in + } + if in.TracingConfig != nil { + in, out := &in.TracingConfig, &out.TracingConfig + *out = new(corev1.SecretKeySelector) + (*in).DeepCopyInto(*out) + } + if in.GRPCServerTLSConfig != nil { + in, out := &in.GRPCServerTLSConfig, &out.GRPCServerTLSConfig + *out = new(TLSConfig) + (*in).DeepCopyInto(*out) + } + if in.VolumeMounts != nil { + in, out := &in.VolumeMounts, &out.VolumeMounts + *out = make([]corev1.VolumeMount, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.AdditionalArgs != nil { + in, out := &in.AdditionalArgs, &out.AdditionalArgs + *out = make([]Argument, len(*in)) + copy(*out, *in) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ThanosSpec. +func (in *ThanosSpec) DeepCopy() *ThanosSpec { + if in == nil { + return nil + } + out := new(ThanosSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TopologySpreadConstraint) DeepCopyInto(out *TopologySpreadConstraint) { + *out = *in + in.CoreV1TopologySpreadConstraint.DeepCopyInto(&out.CoreV1TopologySpreadConstraint) + if in.AdditionalLabelSelectors != nil { + in, out := &in.AdditionalLabelSelectors, &out.AdditionalLabelSelectors + *out = new(AdditionalLabelSelectors) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TopologySpreadConstraint. +func (in *TopologySpreadConstraint) DeepCopy() *TopologySpreadConstraint { + if in == nil { + return nil + } + out := new(TopologySpreadConstraint) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TracingConfig) DeepCopyInto(out *TracingConfig) { + *out = *in + if in.ClientType != nil { + in, out := &in.ClientType, &out.ClientType + *out = new(string) + **out = **in + } + if in.SamplingFraction != nil { + in, out := &in.SamplingFraction, &out.SamplingFraction + x := (*in).DeepCopy() + *out = &x + } + if in.Insecure != nil { + in, out := &in.Insecure, &out.Insecure + *out = new(bool) + **out = **in + } + if in.Headers != nil { + in, out := &in.Headers, &out.Headers + *out = make(map[string]string, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } + if in.Compression != nil { + in, out := &in.Compression, &out.Compression + *out = new(string) + **out = **in + } + if in.Timeout != nil { + in, out := &in.Timeout, &out.Timeout + *out = new(Duration) + **out = **in + } + if in.TLSConfig != nil { + in, out := &in.TLSConfig, &out.TLSConfig + *out = new(TLSConfig) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TracingConfig. +func (in *TracingConfig) DeepCopy() *TracingConfig { + if in == nil { + return nil + } + out := new(TracingConfig) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *WebConfigFileFields) DeepCopyInto(out *WebConfigFileFields) { + *out = *in + if in.TLSConfig != nil { + in, out := &in.TLSConfig, &out.TLSConfig + *out = new(WebTLSConfig) + (*in).DeepCopyInto(*out) + } + if in.HTTPConfig != nil { + in, out := &in.HTTPConfig, &out.HTTPConfig + *out = new(WebHTTPConfig) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WebConfigFileFields. +func (in *WebConfigFileFields) DeepCopy() *WebConfigFileFields { + if in == nil { + return nil + } + out := new(WebConfigFileFields) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *WebHTTPConfig) DeepCopyInto(out *WebHTTPConfig) { + *out = *in + if in.HTTP2 != nil { + in, out := &in.HTTP2, &out.HTTP2 + *out = new(bool) + **out = **in + } + if in.Headers != nil { + in, out := &in.Headers, &out.Headers + *out = new(WebHTTPHeaders) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WebHTTPConfig. +func (in *WebHTTPConfig) DeepCopy() *WebHTTPConfig { + if in == nil { + return nil + } + out := new(WebHTTPConfig) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *WebHTTPHeaders) DeepCopyInto(out *WebHTTPHeaders) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WebHTTPHeaders. +func (in *WebHTTPHeaders) DeepCopy() *WebHTTPHeaders { + if in == nil { + return nil + } + out := new(WebHTTPHeaders) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *WebTLSConfig) DeepCopyInto(out *WebTLSConfig) { + *out = *in + in.Cert.DeepCopyInto(&out.Cert) + if in.CertFile != nil { + in, out := &in.CertFile, &out.CertFile + *out = new(string) + **out = **in + } + in.KeySecret.DeepCopyInto(&out.KeySecret) + if in.KeyFile != nil { + in, out := &in.KeyFile, &out.KeyFile + *out = new(string) + **out = **in + } + in.ClientCA.DeepCopyInto(&out.ClientCA) + if in.ClientCAFile != nil { + in, out := &in.ClientCAFile, &out.ClientCAFile + *out = new(string) + **out = **in + } + if in.ClientAuthType != nil { + in, out := &in.ClientAuthType, &out.ClientAuthType + *out = new(string) + **out = **in + } + if in.MinVersion != nil { + in, out := &in.MinVersion, &out.MinVersion + *out = new(string) + **out = **in + } + if in.MaxVersion != nil { + in, out := &in.MaxVersion, &out.MaxVersion + *out = new(string) + **out = **in + } + if in.CipherSuites != nil { + in, out := &in.CipherSuites, &out.CipherSuites + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.PreferServerCipherSuites != nil { + in, out := &in.PreferServerCipherSuites, &out.PreferServerCipherSuites + *out = new(bool) + **out = **in + } + if in.CurvePreferences != nil { + in, out := &in.CurvePreferences, &out.CurvePreferences + *out = make([]string, len(*in)) + copy(*out, *in) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WebTLSConfig. +func (in *WebTLSConfig) DeepCopy() *WebTLSConfig { + if in == nil { + return nil + } + out := new(WebTLSConfig) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *WorkloadBinding) DeepCopyInto(out *WorkloadBinding) { + *out = *in + if in.Conditions != nil { + in, out := &in.Conditions, &out.Conditions + *out = make([]ConfigResourceCondition, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WorkloadBinding. +func (in *WorkloadBinding) DeepCopy() *WorkloadBinding { + if in == nil { + return nil + } + out := new(WorkloadBinding) + in.DeepCopyInto(out) + return out +} diff --git a/vendor/github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring/v1alpha1/alertmanager_config_conversion.go b/vendor/github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring/v1alpha1/alertmanager_config_conversion.go new file mode 100644 index 0000000000..9e7e59f6e7 --- /dev/null +++ b/vendor/github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring/v1alpha1/alertmanager_config_conversion.go @@ -0,0 +1,18 @@ +// Copyright 2022 The prometheus-operator Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package v1alpha1 + +// Hub marks this type as a conversion hub. +func (*AlertmanagerConfig) Hub() {} diff --git a/vendor/github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring/v1alpha1/alertmanager_config_types.go b/vendor/github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring/v1alpha1/alertmanager_config_types.go new file mode 100644 index 0000000000..ccd9ec8df6 --- /dev/null +++ b/vendor/github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring/v1alpha1/alertmanager_config_types.go @@ -0,0 +1,1680 @@ +// Copyright 2020 The prometheus-operator Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package v1alpha1 + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "html/template" + "regexp" + "strings" + + monitoringv1 "github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring/v1" + + v1 "k8s.io/api/core/v1" + apiextensionsv1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/utils/ptr" +) + +const ( + Version = "v1alpha1" + + AlertmanagerConfigKind = "AlertmanagerConfig" + AlertmanagerConfigName = "alertmanagerconfigs" + AlertmanagerConfigKindKey = "alertmanagerconfig" +) + +// +genclient +// +k8s:openapi-gen=true +// +kubebuilder:resource:categories="prometheus-operator",shortName="amcfg" +// +kubebuilder:storageversion +// +kubebuilder:subresource:status + +// AlertmanagerConfig configures the Prometheus Alertmanager, +// specifying how alerts should be grouped, inhibited and notified to external systems. +type AlertmanagerConfig struct { + // TypeMeta defines the versioned schema of this representation of an object. + metav1.TypeMeta `json:",inline"` + // metadata defines ObjectMeta as the metadata that all persisted resources. + // +optional + metav1.ObjectMeta `json:"metadata,omitempty"` + // spec defines the specification of AlertmanagerConfigSpec + // +required + Spec AlertmanagerConfigSpec `json:"spec"` + // status defines the status subresource. It is under active development and is updated only when the + // "StatusForConfigurationResources" feature gate is enabled. + // + // Most recent observed status of the ServiceMonitor. Read-only. + // More info: + // https://github.com/kubernetes/community/blob/master/contributors/devel/sig-architecture/api-conventions.md#spec-and-status + // +optional + Status monitoringv1.ConfigResourceStatus `json:"status,omitempty,omitzero"` +} + +// AlertmanagerConfigList is a list of AlertmanagerConfig. +// +k8s:openapi-gen=true +type AlertmanagerConfigList struct { + // TypeMeta defines the versioned schema of this representation of an object. + metav1.TypeMeta `json:",inline"` + // metadata defines ListMeta as metadata for collection responses. + metav1.ListMeta `json:"metadata,omitempty"` + // List of AlertmanagerConfig + Items []AlertmanagerConfig `json:"items"` +} + +// AlertmanagerConfigSpec is a specification of the desired behavior of the +// Alertmanager configuration. +// By default, the Alertmanager configuration only applies to alerts for which +// the `namespace` label is equal to the namespace of the AlertmanagerConfig +// resource (see the `.spec.alertmanagerConfigMatcherStrategy` field of the +// Alertmanager CRD). +type AlertmanagerConfigSpec struct { + // route defines the Alertmanager route definition for alerts matching the resource's + // namespace. If present, it will be added to the generated Alertmanager + // configuration as a first-level route. + // +optional + Route *Route `json:"route"` + // receivers defines the list of receivers. + // +listType=map + // +listMapKey=name + // +optional + Receivers []Receiver `json:"receivers"` + // inhibitRules defines the list of inhibition rules. The rules will only apply to alerts matching + // the resource's namespace. + // +listType=atomic + // +optional + InhibitRules []InhibitRule `json:"inhibitRules,omitempty"` + // muteTimeIntervals defines the list of MuteTimeInterval specifying when the routes should be muted. + // +listType=atomic + // +optional + MuteTimeIntervals []MuteTimeInterval `json:"muteTimeIntervals,omitempty"` +} + +// Route defines a node in the routing tree. +type Route struct { + // receiver defines the name of the receiver for this route. If not empty, it should be listed in + // the `receivers` field. + // +optional + Receiver string `json:"receiver"` + // groupBy defines the list of labels to group by. + // Labels must not be repeated (unique list). + // Special label "..." (aggregate by all possible labels), if provided, must be the only element in the list. + // +listType=set + // +optional + GroupBy []string `json:"groupBy,omitempty"` + // groupWait defines how long to wait before sending the initial notification. + // Must match the regular expression`^(([0-9]+)y)?(([0-9]+)w)?(([0-9]+)d)?(([0-9]+)h)?(([0-9]+)m)?(([0-9]+)s)?(([0-9]+)ms)?$` + // Example: "30s" + // +optional + GroupWait string `json:"groupWait,omitempty"` + // groupInterval defines how long to wait before sending an updated notification. + // Must match the regular expression`^(([0-9]+)y)?(([0-9]+)w)?(([0-9]+)d)?(([0-9]+)h)?(([0-9]+)m)?(([0-9]+)s)?(([0-9]+)ms)?$` + // Example: "5m" + // +optional + GroupInterval string `json:"groupInterval,omitempty"` + // repeatInterval defines how long to wait before repeating the last notification. + // Must match the regular expression`^(([0-9]+)y)?(([0-9]+)w)?(([0-9]+)d)?(([0-9]+)h)?(([0-9]+)m)?(([0-9]+)s)?(([0-9]+)ms)?$` + // Example: "4h" + // +optional + RepeatInterval string `json:"repeatInterval,omitempty"` + // matchers defines the list of matchers that the alert's labels should match. For the first + // level route, the operator removes any existing equality and regexp + // matcher on the `namespace` label and adds a `namespace: ` matcher. + // +listType=atomic + // +optional + Matchers []Matcher `json:"matchers,omitempty"` + // continue defines the boolean indicating whether an alert should continue matching subsequent + // sibling nodes. It will always be overridden to true for the first-level + // route by the Prometheus operator. + // +optional + Continue bool `json:"continue,omitempty"` // nolint:kubeapilinter + // routes defines the child routes. + // +listType=atomic + // +optional + Routes []apiextensionsv1.JSON `json:"routes,omitempty"` + // Note: this comment applies to the field definition above but appears + // below otherwise it gets included in the generated manifest. + // CRD schema doesn't support self-referential types for now (see + // https://github.com/kubernetes/kubernetes/issues/62872). We have to use + // an alternative type to circumvent the limitation. The downside is that + // the Kube API can't validate the data beyond the fact that it is a valid + // JSON representation. + + // muteTimeIntervals is a list of MuteTimeInterval names that will mute this route when matched, + // +listType=set + // +optional + MuteTimeIntervals []string `json:"muteTimeIntervals,omitempty"` + // activeTimeIntervals is a list of MuteTimeInterval names when this route should be active. + // +listType=set + // +optional + ActiveTimeIntervals []string `json:"activeTimeIntervals,omitempty"` +} + +// ChildRoutes extracts the child routes. +func (r *Route) ChildRoutes() ([]Route, error) { + out := make([]Route, len(r.Routes)) + + for i, v := range r.Routes { + dec := json.NewDecoder(bytes.NewBuffer(v.Raw)) + dec.DisallowUnknownFields() + if err := dec.Decode(&out[i]); err != nil { + return nil, fmt.Errorf("route[%d]: %w", i, err) + } + } + + return out, nil +} + +// Receiver defines one or more notification integrations. +type Receiver struct { + // name defines the name of the receiver. Must be unique across all items from the list. + // +kubebuilder:validation:MinLength=1 + // +required + Name string `json:"name"` + // opsgenieConfigs defines the list of OpsGenie configurations. + // +listType=atomic + // +optional + OpsGenieConfigs []OpsGenieConfig `json:"opsgenieConfigs,omitempty"` + // pagerdutyConfigs defines the List of PagerDuty configurations. + // +listType=atomic + // +optional + PagerDutyConfigs []PagerDutyConfig `json:"pagerdutyConfigs,omitempty"` + // discordConfigs defines the list of Slack configurations. + // +listType=atomic + // +optional + DiscordConfigs []DiscordConfig `json:"discordConfigs,omitempty"` + // slackConfigs defines the list of Slack configurations. + // +listType=atomic + // +optional + SlackConfigs []SlackConfig `json:"slackConfigs,omitempty"` + // webhookConfigs defines the List of webhook configurations. + // +listType=atomic + // +optional + WebhookConfigs []WebhookConfig `json:"webhookConfigs,omitempty"` + // wechatConfigs defines the list of WeChat configurations. + // +listType=atomic + // +optional + WeChatConfigs []WeChatConfig `json:"wechatConfigs,omitempty"` + // emailConfigs defines the list of Email configurations. + // +listType=atomic + // +optional + EmailConfigs []EmailConfig `json:"emailConfigs,omitempty"` + // victoropsConfigs defines the list of VictorOps configurations. + // +listType=atomic + // +optional + VictorOpsConfigs []VictorOpsConfig `json:"victoropsConfigs,omitempty"` + // pushoverConfigs defines the list of Pushover configurations. + // +listType=atomic + // +optional + PushoverConfigs []PushoverConfig `json:"pushoverConfigs,omitempty"` + // snsConfigs defines the list of SNS configurations + // +listType=atomic + // +optional + SNSConfigs []SNSConfig `json:"snsConfigs,omitempty"` + // telegramConfigs defines the list of Telegram configurations. + // +listType=atomic + // +optional + TelegramConfigs []TelegramConfig `json:"telegramConfigs,omitempty"` + // webexConfigs defines the list of Webex configurations. + // +listType=atomic + // +optional + WebexConfigs []WebexConfig `json:"webexConfigs,omitempty"` + // msteamsConfigs defines the list of MSTeams configurations. + // It requires Alertmanager >= 0.26.0. + // +listType=atomic + // +optional + MSTeamsConfigs []MSTeamsConfig `json:"msteamsConfigs,omitempty"` + // msteamsv2Configs defines the list of MSTeamsV2 configurations. + // It requires Alertmanager >= 0.28.0. + // +listType=atomic + // +optional + MSTeamsV2Configs []MSTeamsV2Config `json:"msteamsv2Configs,omitempty"` + // rocketchatConfigs defines the list of RocketChat configurations. + // It requires Alertmanager >= 0.28.0. + // +listType=atomic + // +optional + RocketChatConfigs []RocketChatConfig `json:"rocketchatConfigs,omitempty"` +} + +// PagerDutyConfig configures notifications via PagerDuty. +// See https://prometheus.io/docs/alerting/latest/configuration/#pagerduty_config +type PagerDutyConfig struct { + // sendResolved defines whether or not to notify about resolved alerts. + // +optional + SendResolved *bool `json:"sendResolved,omitempty"` // nolint:kubeapilinter + // routingKey defines the secret's key that contains the PagerDuty integration key (when using + // Events API v2). Either this field or `serviceKey` needs to be defined. + // The secret needs to be in the same namespace as the AlertmanagerConfig + // object and accessible by the Prometheus Operator. + // +optional + RoutingKey *v1.SecretKeySelector `json:"routingKey,omitempty"` + // serviceKey defines the secret's key that contains the PagerDuty service key (when using + // integration type "Prometheus"). Either this field or `routingKey` needs to + // be defined. + // The secret needs to be in the same namespace as the AlertmanagerConfig + // object and accessible by the Prometheus Operator. + // +optional + ServiceKey *v1.SecretKeySelector `json:"serviceKey,omitempty"` + // url defines the URL to send requests to. + // +optional + URL *URL `json:"url,omitempty"` + // client defines the client identification. + // +kubebuilder:validation:MinLength=1 + // +optional + Client *string `json:"client,omitempty"` + // clientURL defines the backlink to the sender of notification. + // +optional + ClientURL *string `json:"clientURL,omitempty"` + // description of the incident. + // +kubebuilder:validation:MinLength=1 + // +optional + Description *string `json:"description,omitempty"` + // severity of the incident. + // +kubebuilder:validation:MinLength=1 + // +optional + Severity *string `json:"severity,omitempty"` + // class defines the class/type of the event. + // +kubebuilder:validation:MinLength=1 + // +optional + Class *string `json:"class,omitempty"` + // group defines a cluster or grouping of sources. + // +kubebuilder:validation:MinLength=1 + // +optional + Group *string `json:"group,omitempty"` + // component defines the part or component of the affected system that is broken. + // +kubebuilder:validation:MinLength=1 + // +optional + Component *string `json:"component,omitempty"` + // details defines the arbitrary key/value pairs that provide further detail about the incident. + // +listType=atomic + // +optional + Details []KeyValue `json:"details,omitempty"` + // pagerDutyImageConfigs defines a list of image details to attach that provide further detail about an incident. + // +listType=atomic + // +optional + PagerDutyImageConfigs []PagerDutyImageConfig `json:"pagerDutyImageConfigs,omitempty"` + // pagerDutyLinkConfigs defines a list of link details to attach that provide further detail about an incident. + // +listType=atomic + // +optional + PagerDutyLinkConfigs []PagerDutyLinkConfig `json:"pagerDutyLinkConfigs,omitempty"` + // httpConfig defines the HTTP client configuration. + // +optional + HTTPConfig *HTTPConfig `json:"httpConfig,omitempty"` + // source defines the unique location of the affected system. + // +kubebuilder:validation:MinLength=1 + // +optional + Source *string `json:"source,omitempty"` + // timeout is the maximum time allowed to invoke the pagerduty + // It requires Alertmanager >= v0.30.0. + // +optional + Timeout *monitoringv1.Duration `json:"timeout,omitempty"` +} + +// PagerDutyImageConfig attaches images to an incident +type PagerDutyImageConfig struct { + // src of the image being attached to the incident + // +kubebuilder:validation:MinLength=1 + // +optional + Src *string `json:"src,omitempty"` + // href defines the optional URL; makes the image a clickable link. + // +optional + Href *string `json:"href,omitempty"` + // alt is the optional alternative text for the image. + // +kubebuilder:validation:MinLength=1 + // +optional + Alt *string `json:"alt,omitempty"` +} + +// PagerDutyLinkConfig attaches text links to an incident +type PagerDutyLinkConfig struct { + // href defines the URL of the link to be attached + // +optional + Href *string `json:"href,omitempty"` + // alt defines the text that describes the purpose of the link, and can be used as the link's text. + // +kubebuilder:validation:MinLength=1 + // +optional + Text *string `json:"alt,omitempty"` +} + +// DiscordConfig configures notifications via Discord. +// See https://prometheus.io/docs/alerting/latest/configuration/#discord_config +type DiscordConfig struct { + // sendResolved defines whether or not to notify about resolved alerts. + // +optional + SendResolved *bool `json:"sendResolved,omitempty"` // nolint:kubeapilinter + // apiURL defines the secret's key that contains the Discord webhook URL. + // The secret needs to be in the same namespace as the AlertmanagerConfig + // object and accessible by the Prometheus Operator. + // +required + APIURL v1.SecretKeySelector `json:"apiURL"` + // title defines the template of the message's title. + // +optional + Title *string `json:"title,omitempty"` + // message defines the template of the message's body. + // +optional + Message *string `json:"message,omitempty"` + // content defines the template of the content's body. + // +optional + // +kubebuilder:validation:MinLength=1 + Content *string `json:"content,omitempty"` + // username defines the username of the message sender. + // +optional + // +kubebuilder:validation:MinLength=1 + Username *string `json:"username,omitempty"` + // avatarURL defines the avatar url of the message sender. + // +optional + AvatarURL *URL `json:"avatarURL,omitempty"` + // httpConfig defines the HTTP client configuration. + // +optional + HTTPConfig *HTTPConfig `json:"httpConfig,omitempty"` +} + +// SlackConfig configures notifications via Slack. +// See https://prometheus.io/docs/alerting/latest/configuration/#slack_config +type SlackConfig struct { + // sendResolved defines whether or not to notify about resolved alerts. + // +optional + SendResolved *bool `json:"sendResolved,omitempty"` // nolint:kubeapilinter + // apiURL defines the secret's key that contains the Slack webhook URL. + // The secret needs to be in the same namespace as the AlertmanagerConfig + // object and accessible by the Prometheus Operator. + // +optional + APIURL *v1.SecretKeySelector `json:"apiURL,omitempty"` + // channel defines the channel or user to send notifications to. + // +kubebuilder:validation:MinLength=1 + // +optional + Channel *string `json:"channel,omitempty"` + // username defines the slack bot user name. + // +kubebuilder:validation:MinLength=1 + // +optional + Username *string `json:"username,omitempty"` + // color defines the color of the left border of the Slack message attachment. + // Can be a hex color code (e.g., "#ff0000") or a predefined color name. + // +kubebuilder:validation:MinLength=1 + // +optional + Color *string `json:"color,omitempty"` + // title defines the title text displayed in the Slack message attachment. + // +kubebuilder:validation:MinLength=1 + // +optional + Title *string `json:"title,omitempty"` + // titleLink defines the URL that the title will link to when clicked. + // +optional + TitleLink string `json:"titleLink,omitempty"` + // pretext defines optional text that appears above the message attachment block. + // +kubebuilder:validation:MinLength=1 + // +optional + Pretext *string `json:"pretext,omitempty"` + // text defines the main text content of the Slack message attachment. + // +kubebuilder:validation:MinLength=1 + // +optional + Text *string `json:"text,omitempty"` + // fields defines a list of Slack fields that are sent with each notification. + // +kubebuilder:validation:MinItems=1 + // +listType=atomic + // +optional + Fields []SlackField `json:"fields,omitempty"` + // shortFields determines whether fields are displayed in a compact format. + // When true, fields are shown side by side when possible. + // +optional + ShortFields *bool `json:"shortFields,omitempty"` // nolint:kubeapilinter + // footer defines small text displayed at the bottom of the message attachment. + // +kubebuilder:validation:MinLength=1 + // +optional + Footer *string `json:"footer,omitempty"` + // fallback defines a plain-text summary of the attachment for clients that don't support attachments. + // +kubebuilder:validation:MinLength=1 + // +optional + Fallback *string `json:"fallback,omitempty"` + // callbackId defines an identifier for the message used in interactive components. + // +kubebuilder:validation:MinLength=1 + // +optional + CallbackID *string `json:"callbackId,omitempty"` + // iconEmoji defines the emoji to use as the bot's avatar (e.g., ":ghost:"). + // +kubebuilder:validation:MinLength=1 + // +optional + IconEmoji *string `json:"iconEmoji,omitempty"` + // iconURL defines the URL to an image to use as the bot's avatar. + // +optional + IconURL string `json:"iconURL,omitempty"` + // imageURL defines the URL to an image file that will be displayed inside the message attachment. + // +optional + ImageURL string `json:"imageURL,omitempty"` + // thumbURL defines the URL to an image file that will be displayed as a thumbnail + // on the right side of the message attachment. + // +optional + ThumbURL string `json:"thumbURL,omitempty"` + // linkNames enables automatic linking of channel names and usernames in the message. + // When true, @channel and @username will be converted to clickable links. + // +optional + LinkNames *bool `json:"linkNames,omitempty"` // nolint:kubeapilinter + // mrkdwnIn defines which fields should be parsed as Slack markdown. + // Valid values include "pretext", "text", and "fields". + // +listType=atomic + // +kubebuilder:validation:MinItems=1 + // +kubebuilder:validation:items:MinLength=1 + // +optional + MrkdwnIn []string `json:"mrkdwnIn,omitempty"` + // actions defines a list of Slack actions that are sent with each notification. + // +kubebuilder:validation:MinItems=1 + // +listType=atomic + // +optional + Actions []SlackAction `json:"actions,omitempty"` + // httpConfig defines the HTTP client configuration. + // +optional + HTTPConfig *HTTPConfig `json:"httpConfig,omitempty"` + // timeout defines the maximum time to wait for a webhook request to complete, + // before failing the request and allowing it to be retried. + // It requires Alertmanager >= v0.30.0. + // +optional + Timeout *monitoringv1.Duration `json:"timeout,omitempty"` +} + +// Validate ensures SlackConfig is valid. +func (sc *SlackConfig) Validate() error { + for _, action := range sc.Actions { + if err := action.Validate(); err != nil { + return err + } + } + + for _, field := range sc.Fields { + if err := field.Validate(); err != nil { + return err + } + } + + return nil +} + +// SlackAction configures a single Slack action that is sent with each +// notification. +// See https://api.slack.com/docs/message-attachments#action_fields and +// https://api.slack.com/docs/message-buttons for more information. +type SlackAction struct { + // type defines the type of interactive component. + // Common values include "button" for clickable buttons and "select" for dropdown menus. + // +kubebuilder:validation:MinLength=1 + // +required + Type string `json:"type"` + // text defines the user-visible label displayed on the action element. + // For buttons, this is the button text. For select menus, this is the placeholder text. + // +kubebuilder:validation:MinLength=1 + // +required + Text string `json:"text"` + // url defines the URL to open when the action is triggered. + // Only applicable for button-type actions. When set, clicking the button opens this URL. + // +optional + URL string `json:"url,omitempty"` + // style defines the visual appearance of the action element. + // Valid values include "default", "primary" (green), and "danger" (red). + // +kubebuilder:validation:MinLength=1 + // +optional + Style *string `json:"style,omitempty"` + // name defines a unique identifier for the action within the message. + // This value is sent back to your application when the action is triggered. + // +kubebuilder:validation:MinLength=1 + // +optional + Name *string `json:"name,omitempty"` + // value defines the payload sent when the action is triggered. + // This data is included in the callback sent to your application. + // +kubebuilder:validation:MinLength=1 + // +optional + Value *string `json:"value,omitempty"` + // confirm defines an optional confirmation dialog that appears before the action is executed. + // When set, users must confirm their intent before the action proceeds. + // +optional + ConfirmField *SlackConfirmationField `json:"confirm,omitempty"` +} + +// Validate ensures SlackAction is valid. +func (sa *SlackAction) Validate() error { + if sa.Type == "" { + return errors.New("missing type in Slack action configuration") + } + + if sa.Text == "" { + return errors.New("missing text in Slack action configuration") + } + + if sa.URL == "" && ptr.Deref(sa.Name, "") == "" { + return errors.New("missing name or url in Slack action configuration") + } + + if sa.ConfirmField != nil { + if err := sa.ConfirmField.Validate(); err != nil { + return err + } + } + + return nil +} + +// SlackConfirmationField protect users from destructive actions or +// particularly distinguished decisions by asking them to confirm their button +// click one more time. +// See https://api.slack.com/docs/interactive-message-field-guide#confirmation_fields +// for more information. +type SlackConfirmationField struct { + // text defines the main message displayed in the confirmation dialog. + // This should be a clear question or statement asking the user to confirm their action. + // +kubebuilder:validation:MinLength=1 + // +required + Text string `json:"text"` + // title defines the title text displayed at the top of the confirmation dialog. + // When not specified, a default title will be used. + // +kubebuilder:validation:MinLength=1 + // +optional + Title *string `json:"title,omitempty"` + // okText defines the label for the confirmation button in the dialog. + // When not specified, defaults to "Okay". This button proceeds with the action. + // +kubebuilder:validation:MinLength=1 + // +optional + OkText *string `json:"okText,omitempty"` + // dismissText defines the label for the cancel button in the dialog. + // When not specified, defaults to "Cancel". This button cancels the action. + // +kubebuilder:validation:MinLength=1 + // +optional + DismissText *string `json:"dismissText,omitempty"` +} + +// Validate ensures SlackConfirmationField is valid. +func (scf *SlackConfirmationField) Validate() error { + if scf.Text == "" { + return errors.New("missing text in Slack confirmation configuration") + } + return nil +} + +// SlackField configures a single Slack field that is sent with each notification. +// Each field must contain a title, value, and optionally, a boolean value to indicate if the field +// is short enough to be displayed next to other fields designated as short. +// See https://api.slack.com/docs/message-attachments#fields for more information. +type SlackField struct { + // title defines the label or header text displayed for this field. + // This appears as bold text above the field value in the Slack message. + // +kubebuilder:validation:MinLength=1 + // +required + Title string `json:"title"` + // value defines the content or data displayed for this field. + // This appears below the title and can contain plain text or Slack markdown. + // +kubebuilder:validation:MinLength=1 + // +required + Value string `json:"value"` + // short determines whether this field can be displayed alongside other short fields. + // When true, Slack may display this field side by side with other short fields. + // When false or not specified, the field takes the full width of the message. + // +optional + Short *bool `json:"short,omitempty"` // nolint:kubeapilinter +} + +// Validate ensures SlackField is valid +func (sf *SlackField) Validate() error { + if sf.Title == "" { + return errors.New("missing title in Slack field configuration") + } + + if sf.Value == "" { + return errors.New("missing value in Slack field configuration") + } + + return nil +} + +// WebhookConfig configures notifications via a generic receiver supporting the webhook payload. +// See https://prometheus.io/docs/alerting/latest/configuration/#webhook_config +type WebhookConfig struct { + // sendResolved defines whether or not to notify about resolved alerts. + // +optional + SendResolved *bool `json:"sendResolved,omitempty"` // nolint:kubeapilinter + // url defines the URL to send HTTP POST requests to. + // urlSecret takes precedence over url. One of urlSecret and url should be defined. + // +optional + URL *string `json:"url,omitempty"` + // urlSecret defines the secret's key that contains the webhook URL to send HTTP requests to. + // urlSecret takes precedence over url. One of urlSecret and url should be defined. + // The secret needs to be in the same namespace as the AlertmanagerConfig + // object and accessible by the Prometheus Operator. + // +optional + URLSecret *v1.SecretKeySelector `json:"urlSecret,omitempty"` + // httpConfig defines the HTTP client configuration for webhook requests. + // +optional + HTTPConfig *HTTPConfig `json:"httpConfig,omitempty"` + // maxAlerts defines the maximum number of alerts to be sent per webhook message. + // When 0, all alerts are included in the webhook payload. + // +optional + // +kubebuilder:validation:Minimum=0 + MaxAlerts int32 `json:"maxAlerts,omitempty"` + // timeout defines the maximum time to wait for a webhook request to complete, + // before failing the request and allowing it to be retried. + // It requires Alertmanager >= v0.28.0. + // +optional + Timeout *monitoringv1.Duration `json:"timeout,omitempty"` +} + +// OpsGenieConfig configures notifications via OpsGenie. +// See https://prometheus.io/docs/alerting/latest/configuration/#opsgenie_config +type OpsGenieConfig struct { + // sendResolved defines whether or not to notify about resolved alerts. + // +optional + SendResolved *bool `json:"sendResolved,omitempty"` // nolint:kubeapilinter + // apiKey defines the secret's key that contains the OpsGenie API key. + // The secret needs to be in the same namespace as the AlertmanagerConfig + // object and accessible by the Prometheus Operator. + // +optional + APIKey *v1.SecretKeySelector `json:"apiKey,omitempty"` + // apiURL defines the URL to send OpsGenie API requests to. + // When not specified, defaults to the standard OpsGenie API endpoint. + // +optional + APIURL *URL `json:"apiURL,omitempty"` + // message defines the alert text limited to 130 characters. + // This appears as the main alert title in OpsGenie. + // +optional + Message string `json:"message,omitempty"` + // description defines the detailed description of the incident. + // This provides additional context beyond the message field. + // +optional + Description string `json:"description,omitempty"` + // source defines the backlink to the sender of the notification. + // This helps identify where the alert originated from. + // +optional + Source string `json:"source,omitempty"` + // tags defines a comma separated list of tags attached to the notifications. + // These help categorize and filter alerts within OpsGenie. + // +optional + Tags string `json:"tags,omitempty"` + // note defines an additional alert note. + // This provides supplementary information about the alert. + // +optional + Note string `json:"note,omitempty"` + // priority defines the priority level of alert. + // Possible values are P1, P2, P3, P4, and P5, where P1 is highest priority. + // +optional + Priority string `json:"priority,omitempty"` + // updateAlerts defines Whether to update message and description of the alert in OpsGenie if it already exists + // By default, the alert is never updated in OpsGenie, the new message only appears in activity log. + // +optional + UpdateAlerts *bool `json:"updateAlerts,omitempty"` // nolint:kubeapilinter + // details defines a set of arbitrary key/value pairs that provide further detail about the incident. + // These appear as additional fields in the OpsGenie alert. + // +listType=atomic + // +optional + Details []KeyValue `json:"details,omitempty"` + // responders defines the list of responders responsible for notifications. + // These determine who gets notified when the alert is created. + // +listType=atomic + // +optional + Responders []OpsGenieConfigResponder `json:"responders,omitempty"` + // httpConfig defines the HTTP client configuration for OpsGenie API requests. + // +optional + HTTPConfig *HTTPConfig `json:"httpConfig,omitempty"` + // entity defines an optional field that can be used to specify which domain alert is related to. + // This helps group related alerts together in OpsGenie. + // +optional + Entity string `json:"entity,omitempty"` + // actions defines a comma separated list of actions that will be available for the alert. + // These appear as action buttons in the OpsGenie interface. + // +optional + Actions string `json:"actions,omitempty"` +} + +// Validate ensures OpsGenieConfig is valid +func (o *OpsGenieConfig) Validate() error { + for _, responder := range o.Responders { + if err := responder.Validate(); err != nil { + return err + } + } + return nil +} + +// OpsGenieConfigResponder defines a responder to an incident. +// One of `id`, `name` or `username` has to be defined. +type OpsGenieConfigResponder struct { + // id defines the unique identifier of the responder. + // This corresponds to the responder's ID within OpsGenie. + // +optional + ID string `json:"id,omitempty"` + // name defines the display name of the responder. + // This is used when the responder is identified by name rather than ID. + // +optional + Name string `json:"name,omitempty"` + // username defines the username of the responder. + // This is typically used for user-type responders when identifying by username. + // +optional + Username string `json:"username,omitempty"` + // type defines the type of responder. + // Valid values include "user", "team", "schedule", and "escalation". + // This determines how OpsGenie interprets the other identifier fields. + // +kubebuilder:validation:MinLength=1 + // +kubebuilder:validation:Enum=team;teams;user;escalation;schedule + // +required + Type string `json:"type"` +} + +const opsgenieValidTypesRe = `^(team|teams|user|escalation|schedule)$` + +var opsgenieTypeMatcher = regexp.MustCompile(opsgenieValidTypesRe) + +// Validate ensures OpsGenieConfigResponder is valid. +func (r *OpsGenieConfigResponder) Validate() error { + if r.ID == "" && r.Name == "" && r.Username == "" { + return errors.New("responder must have at least an ID, a Name or an Username defined") + } + + if strings.Contains(r.Type, "{{") { + _, err := template.New("").Parse(r.Type) + if err != nil { + return fmt.Errorf("responder %v type is not a valid template: %w", r, err) + } + return nil + } + + if opsgenieTypeMatcher.MatchString(strings.ToLower(r.Type)) { + return nil + } + return fmt.Errorf("opsGenieConfig responder %v type does not match valid options %s", r, opsgenieValidTypesRe) +} + +// HTTPConfig defines a client HTTP configuration. +// See https://prometheus.io/docs/alerting/latest/configuration/#http_config +type HTTPConfig struct { + // authorization defines the authorization header configuration for the client. + // This is mutually exclusive with BasicAuth and is only available starting from Alertmanager v0.22+. + // +optional + Authorization *monitoringv1.SafeAuthorization `json:"authorization,omitempty"` + // basicAuth defines the basic authentication credentials for the client. + // This is mutually exclusive with Authorization. If both are defined, BasicAuth takes precedence. + // +optional + BasicAuth *monitoringv1.BasicAuth `json:"basicAuth,omitempty"` + // oauth2 defines the OAuth2 client credentials used to fetch a token for the targets. + // This enables OAuth2 authentication flow for HTTP requests. + // +optional + OAuth2 *monitoringv1.OAuth2 `json:"oauth2,omitempty"` + // bearerTokenSecret defines the secret's key that contains the bearer token to be used by the client + // for authentication. + // The secret needs to be in the same namespace as the AlertmanagerConfig + // object and accessible by the Prometheus Operator. + // +optional + BearerTokenSecret *v1.SecretKeySelector `json:"bearerTokenSecret,omitempty"` + // tlsConfig defines the TLS configuration for the client. + // This includes settings for certificates, CA validation, and TLS protocol options. + // +optional + TLSConfig *monitoringv1.SafeTLSConfig `json:"tlsConfig,omitempty"` + + // proxyURL defines an optional proxy URL for HTTP requests. + // If defined, this field takes precedence over `proxyUrl`. + // + // +optional + ProxyURLOriginal *string `json:"proxyURL,omitempty"` + + monitoringv1.ProxyConfig `json:",inline"` + + // followRedirects specifies whether the client should follow HTTP 3xx redirects. + // When true, the client will automatically follow redirect responses. + // +optional + FollowRedirects *bool `json:"followRedirects,omitempty"` // nolint:kubeapilinter + + // enableHttp2 can be used to disable HTTP2. + // + // +optional + EnableHTTP2 *bool `json:"enableHttp2,omitempty"` // nolint:kubeapilinter +} + +// WebexConfig configures notification via Cisco Webex +// See https://prometheus.io/docs/alerting/latest/configuration/#webex_config +type WebexConfig struct { + // sendResolved defines whether or not to notify about resolved alerts. + // +optional + SendResolved *bool `json:"sendResolved,omitempty"` // nolint:kubeapilinter + + // apiURL defines the Webex Teams API URL i.e. https://webexapis.com/v1/messages + // +optional + APIURL *URL `json:"apiURL,omitempty"` + + // httpConfig defines the HTTP client's configuration. + // +optional + HTTPConfig *HTTPConfig `json:"httpConfig,omitempty"` + + // message defines the message template + // +optional + Message *string `json:"message,omitempty"` + + // roomID defines the ID of the Webex Teams room where to send the messages. + // +kubebuilder:validation:MinLength=1 + // +required + RoomID string `json:"roomID"` +} + +// WeChatConfig configures notifications via WeChat. +// See https://prometheus.io/docs/alerting/latest/configuration/#wechat_config +type WeChatConfig struct { + // sendResolved defines whether or not to notify about resolved alerts. + // +optional + SendResolved *bool `json:"sendResolved,omitempty"` // nolint:kubeapilinter + // apiSecret defines the secret's key that contains the WeChat API key. + // The secret needs to be in the same namespace as the AlertmanagerConfig + // object and accessible by the Prometheus Operator. + // +optional + APISecret *v1.SecretKeySelector `json:"apiSecret,omitempty"` + // apiURL defines the WeChat API URL. + // When not specified, defaults to the standard WeChat Work API endpoint. + // +optional + APIURL *URL `json:"apiURL,omitempty"` + // corpID defines the corp id for authentication. + // This is the unique identifier for your WeChat Work organization. + // +optional + CorpID string `json:"corpID,omitempty"` + // agentID defines the application agent ID within WeChat Work. + // This identifies which WeChat Work application will send the notifications. + // +optional + AgentID string `json:"agentID,omitempty"` + // toUser defines the target user(s) to receive the notification. + // Can be a single user ID or multiple user IDs separated by '|'. + // +optional + ToUser string `json:"toUser,omitempty"` + // toParty defines the target department(s) to receive the notification. + // Can be a single department ID or multiple department IDs separated by '|'. + // +optional + ToParty string `json:"toParty,omitempty"` + // toTag defines the target tag(s) to receive the notification. + // Can be a single tag ID or multiple tag IDs separated by '|'. + // +optional + ToTag string `json:"toTag,omitempty"` + // message defines the API request data as defined by the WeChat API. + // This contains the actual notification content to be sent. + // +optional + Message string `json:"message,omitempty"` + // messageType defines the type of message to send. + // Valid values include "text", "markdown", and other WeChat Work supported message types. + // +optional + MessageType string `json:"messageType,omitempty"` + // httpConfig defines the HTTP client configuration for WeChat API requests. + // +optional + HTTPConfig *HTTPConfig `json:"httpConfig,omitempty"` +} + +// EmailConfig configures notifications via Email. +type EmailConfig struct { + // sendResolved defines whether or not to notify about resolved alerts. + // +optional + SendResolved *bool `json:"sendResolved,omitempty"` // nolint:kubeapilinter + // to defines the email address to send notifications to. + // This is the recipient address for alert notifications. + // +optional + To string `json:"to,omitempty"` + // from defines the sender address for email notifications. + // This appears as the "From" field in the email header. + // +optional + From string `json:"from,omitempty"` + // hello defines the hostname to identify to the SMTP server. + // This is used in the SMTP HELO/EHLO command during the connection handshake. + // +optional + Hello string `json:"hello,omitempty"` + // smarthost defines the SMTP host and port through which emails are sent. + // Format should be "hostname:port", e.g. "smtp.example.com:587". + // +optional + Smarthost string `json:"smarthost,omitempty"` + // authUsername defines the username to use for SMTP authentication. + // This is used for SMTP AUTH when the server requires authentication. + // +optional + AuthUsername string `json:"authUsername,omitempty"` + // authPassword defines the secret's key that contains the password to use for authentication. + // The secret needs to be in the same namespace as the AlertmanagerConfig + // object and accessible by the Prometheus Operator. + // +optional + AuthPassword *v1.SecretKeySelector `json:"authPassword,omitempty"` + // authSecret defines the secret's key that contains the CRAM-MD5 secret. + // This is used for CRAM-MD5 authentication mechanism. + // The secret needs to be in the same namespace as the AlertmanagerConfig + // object and accessible by the Prometheus Operator. + // +optional + AuthSecret *v1.SecretKeySelector `json:"authSecret,omitempty"` + // authIdentity defines the identity to use for SMTP authentication. + // This is typically used with PLAIN authentication mechanism. + // +optional + AuthIdentity string `json:"authIdentity,omitempty"` + // headers defines additional email header key/value pairs. + // These override any headers previously set by the notification implementation. + // +listType=atomic + // +optional + Headers []KeyValue `json:"headers,omitempty"` + // html defines the HTML body of the email notification. + // This allows for rich formatting in the email content. + // +optional + HTML *string `json:"html,omitempty"` + // text defines the plain text body of the email notification. + // This provides a fallback for email clients that don't support HTML. + // +optional + Text *string `json:"text,omitempty"` + // requireTLS defines the SMTP TLS requirement. + // Note that Go does not support unencrypted connections to remote SMTP endpoints. + // +optional + RequireTLS *bool `json:"requireTLS,omitempty"` // nolint:kubeapilinter + // tlsConfig defines the TLS configuration for SMTP connections. + // This includes settings for certificates, CA validation, and TLS protocol options. + // +optional + TLSConfig *monitoringv1.SafeTLSConfig `json:"tlsConfig,omitempty"` +} + +// VictorOpsConfig configures notifications via VictorOps. +// See https://prometheus.io/docs/alerting/latest/configuration/#victorops_config +type VictorOpsConfig struct { + // sendResolved defines whether or not to notify about resolved alerts. + // +optional + SendResolved *bool `json:"sendResolved,omitempty"` // nolint:kubeapilinter + // apiKey defines the secret's key that contains the API key to use when talking to the VictorOps API. + // The secret needs to be in the same namespace as the AlertmanagerConfig + // object and accessible by the Prometheus Operator. + // +optional + APIKey *v1.SecretKeySelector `json:"apiKey,omitempty"` + // apiUrl defines the VictorOps API URL. + // When not specified, defaults to the standard VictorOps API endpoint. + // +optional + APIURL *URL `json:"apiUrl,omitempty"` + // routingKey defines a key used to map the alert to a team. + // This determines which VictorOps team will receive the alert notification. + // +kubebuilder:validation:MinLength=1 + // +required + RoutingKey string `json:"routingKey"` + // messageType describes the behavior of the alert. + // Valid values are "CRITICAL", "WARNING", and "INFO". + // +kubebuilder:validation:MinLength=1 + // +optional + MessageType *string `json:"messageType,omitempty"` + // entityDisplayName contains a summary of the alerted problem. + // This appears as the main title or identifier for the incident. + // +kubebuilder:validation:MinLength=1 + // +optional + EntityDisplayName *string `json:"entityDisplayName,omitempty"` + // stateMessage contains a long explanation of the alerted problem. + // This provides detailed context about the incident. + // +kubebuilder:validation:MinLength=1 + // +optional + StateMessage *string `json:"stateMessage,omitempty"` + // monitoringTool defines the monitoring tool the state message is from. + // This helps identify the source system that generated the alert. + // +kubebuilder:validation:MinLength=1 + // +optional + MonitoringTool *string `json:"monitoringTool,omitempty"` + // customFields defines additional custom fields for notification. + // These provide extra metadata that will be included with the VictorOps incident. + // +listType=atomic + // +optional + CustomFields []KeyValue `json:"customFields,omitempty"` + // httpConfig defines the HTTP client's configuration for VictorOps API requests. + // +optional + HTTPConfig *HTTPConfig `json:"httpConfig,omitempty"` +} + +// PushoverConfig configures notifications via Pushover. +// See https://prometheus.io/docs/alerting/latest/configuration/#pushover_config +type PushoverConfig struct { + // sendResolved defines whether or not to notify about resolved alerts. + // +optional + SendResolved *bool `json:"sendResolved,omitempty"` // nolint:kubeapilinter + // userKey defines the secret's key that contains the recipient user's user key. + // The secret needs to be in the same namespace as the AlertmanagerConfig + // object and accessible by the Prometheus Operator. + // Either `userKey` or `userKeyFile` is required. + // +optional + UserKey *v1.SecretKeySelector `json:"userKey,omitempty"` + // userKeyFile defines the user key file that contains the recipient user's user key. + // Either `userKey` or `userKeyFile` is required. + // It requires Alertmanager >= v0.26.0. + // +kubebuilder:validation:MinLength=1 + // +optional + UserKeyFile *string `json:"userKeyFile,omitempty"` + // token defines the secret's key that contains the registered application's API token. + // See https://pushover.net/apps for application registration. + // The secret needs to be in the same namespace as the AlertmanagerConfig + // object and accessible by the Prometheus Operator. + // Either `token` or `tokenFile` is required. + // +optional + Token *v1.SecretKeySelector `json:"token,omitempty"` + // tokenFile defines the token file that contains the registered application's API token. + // See https://pushover.net/apps for application registration. + // Either `token` or `tokenFile` is required. + // It requires Alertmanager >= v0.26.0. + // +kubebuilder:validation:MinLength=1 + // +optional + TokenFile *string `json:"tokenFile,omitempty"` + // title defines the notification title displayed in the Pushover message. + // This appears as the bold header text in the notification. + // +kubebuilder:validation:MinLength=1 + // +optional + Title *string `json:"title,omitempty"` + // message defines the notification message content. + // This is the main body text of the Pushover notification. + // +kubebuilder:validation:MinLength=1 + // +optional + Message *string `json:"message,omitempty"` + // url defines a supplementary URL shown alongside the message. + // This creates a clickable link within the Pushover notification. + // +optional + URL string `json:"url,omitempty"` + // urlTitle defines a title for the supplementary URL. + // If not specified, the raw URL is shown instead. + // +kubebuilder:validation:MinLength=1 + // +optional + URLTitle *string `json:"urlTitle,omitempty"` + // ttl defines the time to live for the alert notification. + // This determines how long the notification remains active before expiring. + // +optional + TTL *monitoringv1.Duration `json:"ttl,omitempty"` + // device defines the name of a specific device to send the notification to. + // If not specified, the notification is sent to all user's devices. + // +kubebuilder:validation:MinLength=1 + // +optional + Device *string `json:"device,omitempty"` + // sound defines the name of one of the sounds supported by device clients. + // This overrides the user's default sound choice for this notification. + // +kubebuilder:validation:MinLength=1 + // +optional + Sound *string `json:"sound,omitempty"` + // priority defines the notification priority level. + // See https://pushover.net/api#priority for valid values and behavior. + // +kubebuilder:validation:MinLength=1 + // +optional + Priority *string `json:"priority,omitempty"` + // retry defines how often the Pushover servers will send the same notification to the user. + // Must be at least 30 seconds. Only applies to priority 2 notifications. + // +kubebuilder:validation:Pattern=`^(([0-9]+)y)?(([0-9]+)w)?(([0-9]+)d)?(([0-9]+)h)?(([0-9]+)m)?(([0-9]+)s)?(([0-9]+)ms)?$` + // +optional + Retry *string `json:"retry,omitempty"` + // expire defines how long your notification will continue to be retried for, + // unless the user acknowledges the notification. Only applies to priority 2 notifications. + // +kubebuilder:validation:Pattern=`^(([0-9]+)y)?(([0-9]+)w)?(([0-9]+)d)?(([0-9]+)h)?(([0-9]+)m)?(([0-9]+)s)?(([0-9]+)ms)?$` + // +optional + Expire *string `json:"expire,omitempty"` + // html defines whether notification message is HTML or plain text. + // When true, the message can include HTML formatting tags. + // html and monospace formatting are mutually exclusive. + // +optional + HTML *bool `json:"html,omitempty"` //nolint:kubeapilinter + // monospace optional HTML/monospace formatting for the message, see https://pushover.net/api#html + // html and monospace formatting are mutually exclusive. + // +optional + Monospace *bool `json:"monospace,omitempty"` //nolint:kubeapilinter + // httpConfig defines the HTTP client configuration for Pushover API requests. + // +optional + HTTPConfig *HTTPConfig `json:"httpConfig,omitempty"` +} + +// SNSConfig configures notifications via AWS SNS. +// See https://prometheus.io/docs/alerting/latest/configuration/#sns_configs +type SNSConfig struct { + // sendResolved defines whether or not to notify about resolved alerts. + // +optional + SendResolved *bool `json:"sendResolved,omitempty"` // nolint:kubeapilinter + // apiURL defines the SNS API URL, e.g. https://sns.us-east-2.amazonaws.com. + // If not specified, the SNS API URL from the SNS SDK will be used. + // +optional + ApiURL string `json:"apiURL,omitempty"` + // sigv4 configures AWS's Signature Verification 4 signing process to sign requests. + // This includes AWS credentials and region configuration for authentication. + // +optional + Sigv4 *monitoringv1.Sigv4 `json:"sigv4,omitempty"` + // topicARN defines the SNS topic ARN, e.g. arn:aws:sns:us-east-2:698519295917:My-Topic. + // If you don't specify this value, you must specify a value for the PhoneNumber or TargetARN. + // +optional + TopicARN string `json:"topicARN,omitempty"` + // subject defines the subject line when the message is delivered to email endpoints. + // This field is only used when sending to email subscribers of an SNS topic. + // +optional + Subject string `json:"subject,omitempty"` + // phoneNumber defines the phone number if message is delivered via SMS in E.164 format. + // If you don't specify this value, you must specify a value for the TopicARN or TargetARN. + // +optional + PhoneNumber string `json:"phoneNumber,omitempty"` + // targetARN defines the mobile platform endpoint ARN if message is delivered via mobile notifications. + // If you don't specify this value, you must specify a value for the TopicARN or PhoneNumber. + // +optional + TargetARN string `json:"targetARN,omitempty"` + // message defines the message content of the SNS notification. + // This is the actual notification text that will be sent to subscribers. + // +optional + Message string `json:"message,omitempty"` + // attributes defines SNS message attributes as key-value pairs. + // These provide additional metadata that can be used for message filtering and routing. + // +optional + //nolint:kubeapilinter + Attributes map[string]string `json:"attributes,omitempty"` + // httpConfig defines the HTTP client configuration for SNS API requests. + // +optional + HTTPConfig *HTTPConfig `json:"httpConfig,omitempty"` +} + +// TelegramConfig configures notifications via Telegram. +// See https://prometheus.io/docs/alerting/latest/configuration/#telegram_config +type TelegramConfig struct { + // sendResolved defines whether or not to notify about resolved alerts. + // +optional + SendResolved *bool `json:"sendResolved,omitempty"` // nolint:kubeapilinter + // apiURL defines the Telegram API URL, e.g. https://api.telegram.org. + // If not specified, the default Telegram API URL will be used. + // +optional + APIURL *URL `json:"apiURL,omitempty"` + // botToken defines the Telegram bot token. It is mutually exclusive with `botTokenFile`. + // The secret needs to be in the same namespace as the AlertmanagerConfig + // object and accessible by the Prometheus Operator. + // Either `botToken` or `botTokenFile` is required. + // +optional + BotToken *v1.SecretKeySelector `json:"botToken,omitempty"` + // botTokenFile defines the file to read the Telegram bot token from. + // It is mutually exclusive with `botToken`. + // Either `botToken` or `botTokenFile` is required. + // It requires Alertmanager >= v0.26.0. + // +optional + BotTokenFile *string `json:"botTokenFile,omitempty"` + // chatID defines the Telegram chat ID where messages will be sent. + // This can be a user ID, group ID, or channel ID (with @ prefix for public channels). + // +required + ChatID int64 `json:"chatID,omitempty"` + // messageThreadID defines the Telegram Group Topic ID for threaded messages. + // This allows sending messages to specific topics within Telegram groups. + // It requires Alertmanager >= 0.26.0. + // +optional + MessageThreadID *int64 `json:"messageThreadID,omitempty"` + // message defines the message template for the Telegram notification. + // This is the content that will be sent to the specified chat. + // +optional + Message string `json:"message,omitempty"` + // disableNotifications controls whether Telegram notifications are sent silently. + // When true, users will receive the message without notification sounds. + // +optional + DisableNotifications *bool `json:"disableNotifications,omitempty"` // nolint:kubeapilinter + // parseMode defines the parse mode for telegram message formatting. + // Valid values are "MarkdownV2", "Markdown", and "HTML". + // This determines how text formatting is interpreted in the message. + //+kubebuilder:validation:Enum=MarkdownV2;Markdown;HTML + // +optional + ParseMode string `json:"parseMode,omitempty"` + // httpConfig defines the HTTP client configuration for Telegram API requests. + // +optional + HTTPConfig *HTTPConfig `json:"httpConfig,omitempty"` +} + +// MSTeamsConfig configures notifications via Microsoft Teams. +// It requires Alertmanager >= 0.26.0. +type MSTeamsConfig struct { + // sendResolved defines whether or not to notify about resolved alerts. + // +optional + SendResolved *bool `json:"sendResolved,omitempty"` // nolint:kubeapilinter + // webhookUrl defines the MSTeams webhook URL for sending notifications. + // This is the incoming webhook URL configured in your Teams channel. + // +required + WebhookURL v1.SecretKeySelector `json:"webhookUrl"` + // title defines the message title template for Teams notifications. + // This appears as the main heading of the Teams message card. + // +optional + Title *string `json:"title,omitempty"` + // summary defines the message summary template for Teams notifications. + // This provides a brief overview that appears in Teams notification previews. + // It requires Alertmanager >= 0.27.0. + // +optional + Summary *string `json:"summary,omitempty"` + // text defines the message body template for Teams notifications. + // This contains the detailed content of the Teams message. + // +optional + Text *string `json:"text,omitempty"` + // httpConfig defines the HTTP client configuration for Teams webhook requests. + // +optional + HTTPConfig *HTTPConfig `json:"httpConfig,omitempty"` +} + +// MSTeamsV2Config configures notifications via Microsoft Teams using the new message format with adaptive cards as required by flows. +// See https://prometheus.io/docs/alerting/latest/configuration/#msteamsv2_config +// It requires Alertmanager >= 0.28.0. +type MSTeamsV2Config struct { + // sendResolved defines whether or not to notify about resolved alerts. + // +optional + SendResolved *bool `json:"sendResolved,omitempty"` // nolint:kubeapilinter + // webhookURL defines the MSTeams incoming webhook URL for adaptive card notifications. + // This webhook must support the newer adaptive cards format required by Teams flows. + // +optional + WebhookURL *v1.SecretKeySelector `json:"webhookURL,omitempty"` + // title defines the message title template for adaptive card notifications. + // This appears as the main heading in the Teams adaptive card. + // +kubebuilder:validation:MinLength=1 + // +optional + Title *string `json:"title,omitempty"` + // text defines the message body template for adaptive card notifications. + // This contains the detailed content displayed in the Teams adaptive card format. + // +kubebuilder:validation:MinLength=1 + // +optional + Text *string `json:"text,omitempty"` + // httpConfig defines the HTTP client configuration for Teams webhook requests. + // +optional + HTTPConfig *HTTPConfig `json:"httpConfig,omitempty"` +} + +// RocketChatConfig configures notifications via RocketChat. +// It requires Alertmanager >= 0.28.0. +type RocketChatConfig struct { + // sendResolved defines whether or not to notify about resolved alerts. + // +optional + SendResolved *bool `json:"sendResolved,omitempty"` // nolint:kubeapilinter + // apiURL defines the API URL for RocketChat. + // Defaults to https://open.rocket.chat/ if not specified. + // +optional + APIURL *URL `json:"apiURL,omitempty"` + // channel defines the channel to send alerts to. + // This can be a channel name (e.g., "#alerts") or a direct message recipient. + // +kubebuilder:validation:MinLength=1 + // +optional + Channel *string `json:"channel,omitempty"` + // token defines the sender token for RocketChat authentication. + // This is the personal access token or bot token used to authenticate API requests. + // +required + Token v1.SecretKeySelector `json:"token,omitempty"` + // tokenID defines the sender token ID for RocketChat authentication. + // This is the user ID associated with the token used for API requests. + // +required + TokenID v1.SecretKeySelector `json:"tokenID,omitempty"` + // color defines the message color displayed in RocketChat. + // This appears as a colored bar alongside the message. + // +kubebuilder:validation:MinLength=1 + // +optional + Color *string `json:"color,omitempty"` + // emoji defines the emoji to be displayed as an avatar. + // If provided, this emoji will be used instead of the default avatar or iconURL. + // +kubebuilder:validation:MinLength=1 + // +optional + Emoji *string `json:"emoji,omitempty"` + // iconURL defines the icon URL for the message avatar. + // This displays a custom image as the message sender's avatar. + // +optional + IconURL *string `json:"iconURL,omitempty"` + // text defines the message text to send. + // This is optional because attachments can be used instead of or alongside text. + // +kubebuilder:validation:MinLength=1 + // +optional + Text *string `json:"text,omitempty"` + // title defines the message title displayed prominently in the message. + // This appears as bold text at the top of the message attachment. + // +kubebuilder:validation:MinLength=1 + // +optional + Title *string `json:"title,omitempty"` + // titleLink defines the URL that the title will link to when clicked. + // This makes the message title clickable in the RocketChat interface. + // +kubebuilder:validation:MinLength=1 + // +optional + TitleLink *string `json:"titleLink,omitempty"` + // fields defines additional fields for the message attachment. + // These appear as structured key-value pairs within the message. + // +listType=atomic + // +kubebuilder:validation:MinItems=1 + // +optional + Fields []RocketChatFieldConfig `json:"fields,omitempty"` + // shortFields defines whether to use short fields in the message layout. + // When true, fields may be displayed side by side to save space. + // +optional + ShortFields *bool `json:"shortFields,omitempty"` // nolint:kubeapilinter + // imageURL defines the image URL to display within the message. + // This embeds an image directly in the message attachment. + // +optional + ImageURL *string `json:"imageURL,omitempty"` + // thumbURL defines the thumbnail URL for the message. + // This displays a small thumbnail image alongside the message content. + // +optional + ThumbURL *string `json:"thumbURL,omitempty"` + // linkNames defines whether to enable automatic linking of usernames and channels. + // When true, @username and #channel references become clickable links. + // +optional + LinkNames *bool `json:"linkNames,omitempty"` // nolint:kubeapilinter + // actions defines interactive actions to include in the message. + // These appear as buttons that users can click to trigger responses. + // +listType=atomic + // +kubebuilder:validation:MinItems=1 + // +optional + Actions []RocketChatActionConfig `json:"actions,omitempty"` + // httpConfig defines the HTTP client configuration for RocketChat API requests. + // +optional + HTTPConfig *HTTPConfig `json:"httpConfig,omitempty"` +} + +// RocketChatFieldConfig defines additional fields for RocketChat messages. +type RocketChatFieldConfig struct { + // title defines the title of this field. + // This appears as bold text labeling the field content. + // +kubebuilder:validation:MinLength=1 + // +optional + Title *string `json:"title,omitempty"` + // value defines the value of this field, displayed underneath the title. + // This contains the actual data or content for the field. + // +kubebuilder:validation:MinLength=1 + // +optional + Value *string `json:"value,omitempty"` + // short defines whether this field should be a short field. + // When true, the field may be displayed inline with other short fields to save space. + // +optional + Short *bool `json:"short,omitempty"` // nolint:kubeapilinter +} + +// RocketChatActionConfig defines actions for RocketChat messages. +type RocketChatActionConfig struct { + // text defines the button text displayed to users. + // This is the label that appears on the interactive button. + // +kubebuilder:validation:MinLength=1 + // +optional + Text *string `json:"text,omitempty"` + // url defines the URL the button links to when clicked. + // This creates a clickable button that opens the specified URL. + // +optional + URL *string `json:"url,omitempty"` + // msg defines the message to send when the button is clicked. + // This allows the button to post a predefined message to the channel. + // +kubebuilder:validation:MinLength=1 + // +optional + Msg *string `json:"msg,omitempty"` +} + +// InhibitRule defines an inhibition rule that allows to mute alerts when other +// alerts are already firing. +// See https://prometheus.io/docs/alerting/latest/configuration/#inhibit_rule +type InhibitRule struct { + // targetMatch defines matchers that have to be fulfilled in the alerts to be muted. + // The operator enforces that the alert matches the resource's namespace. + // When these conditions are met, matching alerts will be inhibited (silenced). + // +listType=atomic + // +optional + TargetMatch []Matcher `json:"targetMatch,omitempty"` + // sourceMatch defines matchers for which one or more alerts have to exist for the inhibition + // to take effect. The operator enforces that the alert matches the resource's namespace. + // These are the "trigger" alerts that cause other alerts to be inhibited. + // +listType=atomic + // +optional + SourceMatch []Matcher `json:"sourceMatch,omitempty"` + // equal defines labels that must have an equal value in the source and target alert + // for the inhibition to take effect. This ensures related alerts are properly grouped. + // +listType=atomic + // +optional + Equal []string `json:"equal,omitempty"` +} + +// KeyValue defines a (key, value) tuple. +type KeyValue struct { + // key defines the key of the tuple. + // This is the identifier or name part of the key-value pair. + // +kubebuilder:validation:MinLength=1 + // +required + Key string `json:"key"` + // value defines the value of the tuple. + // This is the data or content associated with the key. + // +required + Value string `json:"value"` +} + +// Matcher defines how to match on alert's labels. +type Matcher struct { + // name defines the label to match. + // This specifies which alert label should be evaluated. + // +kubebuilder:validation:MinLength=1 + // +required + Name string `json:"name"` + // value defines the label value to match. + // This is the expected value for the specified label. + // +optional + Value string `json:"value"` + // matchType defines the match operation available with AlertManager >= v0.22.0. + // Takes precedence over Regex (deprecated) if non-empty. + // Valid values: "=" (equality), "!=" (inequality), "=~" (regex match), "!~" (regex non-match). + // +kubebuilder:validation:Enum=!=;=;=~;!~ + // +optional + MatchType MatchType `json:"matchType,omitempty"` + // regex defines whether to match on equality (false) or regular-expression (true). + // Deprecated: for AlertManager >= v0.22.0, `matchType` should be used instead. + // +optional + Regex bool `json:"regex,omitempty"` // nolint:kubeapilinter +} + +// String returns Matcher as a string +// Use only for MatchType Matcher +func (in Matcher) String() string { + return fmt.Sprintf(`%s%s"%s"`, in.Name, in.MatchType, openMetricsEscape(in.Value)) +} + +// Validate the Matcher returns an error if the matcher is invalid +// Validates only non-deprecated matching fields +func (in Matcher) Validate() error { + // nothing to do + if in.MatchType == "" { + return nil + } + + if !in.MatchType.Valid() { + return fmt.Errorf("invalid 'matchType' '%s' provided'", in.MatchType) + } + + if strings.TrimSpace(in.Name) == "" { + return errors.New("matcher 'name' is required") + } + + return nil +} + +// MatchType is a comparison operator on a Matcher +type MatchType string + +// Valid MatchType returns true if the operator is acceptable +func (mt MatchType) Valid() bool { + _, ok := validMatchTypes[mt] + return ok +} + +// DeepCopyObject implements the runtime.Object interface. +func (l *AlertmanagerConfig) DeepCopyObject() runtime.Object { + return l.DeepCopy() +} + +// DeepCopyObject implements the runtime.Object interface. +func (l *AlertmanagerConfigList) DeepCopyObject() runtime.Object { + return l.DeepCopy() +} + +const ( + MatchEqual MatchType = "=" + MatchNotEqual MatchType = "!=" + MatchRegexp MatchType = "=~" + MatchNotRegexp MatchType = "!~" +) + +var validMatchTypes = map[MatchType]bool{ + MatchEqual: true, + MatchNotEqual: true, + MatchRegexp: true, + MatchNotRegexp: true, +} + +// openMetricsEscape is similar to the usual string escaping, but more +// restricted. It merely replaces a new-line character with '\n', a double-quote +// character with '\"', and a backslash with '\\', which is the escaping used by +// OpenMetrics. +// * Copied from alertmanager codebase pkg/labels * +func openMetricsEscape(s string) string { + r := strings.NewReplacer( + `\`, `\\`, + "\n", `\n`, + `"`, `\"`, + ) + return r.Replace(s) +} + +// MuteTimeInterval specifies the periods in time when notifications will be muted +type MuteTimeInterval struct { + // name of the time interval + // +required + Name string `json:"name,omitempty"` + // timeIntervals defines a list of TimeInterval + // +listType=atomic + // +optional + TimeIntervals []TimeInterval `json:"timeIntervals,omitempty"` +} + +// TimeInterval describes intervals of time +type TimeInterval struct { + // times defines a list of TimeRange + // +listType=atomic + // +optional + Times []TimeRange `json:"times,omitempty"` + // weekdays defines a list of WeekdayRange + // +listType=atomic + // +optional + Weekdays []WeekdayRange `json:"weekdays,omitempty"` + // daysOfMonth defines a list of DayOfMonthRange + // +listType=atomic + // +optional + DaysOfMonth []DayOfMonthRange `json:"daysOfMonth,omitempty"` + // months defines a list of MonthRange + // +listType=atomic + // +optional + Months []MonthRange `json:"months,omitempty"` + // years defines a list of YearRange + // +listType=atomic + // +optional + Years []YearRange `json:"years,omitempty"` +} + +// Time defines a time in 24hr format +// +kubebuilder:validation:Pattern=`^((([01][0-9])|(2[0-3])):[0-5][0-9])$|(^24:00$)` +type Time string + +// TimeRange defines a start and end time in 24hr format +type TimeRange struct { + // startTime defines the start time in 24hr format. + // +optional + StartTime Time `json:"startTime,omitempty"` + // endTime defines the end time in 24hr format. + // +optional + EndTime Time `json:"endTime,omitempty"` +} + +// WeekdayRange is an inclusive range of days of the week beginning on Sunday +// Days can be specified by name (e.g 'Sunday') or as an inclusive range (e.g 'Monday:Friday') +// +kubebuilder:validation:Pattern=`^((?i)sun|mon|tues|wednes|thurs|fri|satur)day(?:((:(sun|mon|tues|wednes|thurs|fri|satur)day)$)|$)` +type WeekdayRange string + +// DayOfMonthRange is an inclusive range of days of the month beginning at 1 +type DayOfMonthRange struct { + // start of the inclusive range + // +kubebuilder:validation:Minimum=-31 + // +kubebuilder:validation:Maximum=31 + // +optional + Start int `json:"start,omitempty"` + // end of the inclusive range + // +kubebuilder:validation:Minimum=-31 + // +kubebuilder:validation:Maximum=31 + // +optional + End int `json:"end,omitempty"` +} + +// MonthRange is an inclusive range of months of the year beginning in January +// Months can be specified by name (e.g 'January') by numerical month (e.g '1') or as an inclusive range (e.g 'January:March', '1:3', '1:March') +// +kubebuilder:validation:Pattern=`^((?i)january|february|march|april|may|june|july|august|september|october|november|december|1[0-2]|[1-9])(?:((:((?i)january|february|march|april|may|june|july|august|september|october|november|december|1[0-2]|[1-9]))$)|$)` +type MonthRange string + +// YearRange is an inclusive range of years +// +kubebuilder:validation:Pattern=`^2\d{3}(?::2\d{3}|$)` +type YearRange string + +// Weekday is day of the week +type Weekday string + +const ( + Sunday Weekday = "sunday" + Monday Weekday = "monday" + Tuesday Weekday = "tuesday" + Wednesday Weekday = "wednesday" + Thursday Weekday = "thursday" + Friday Weekday = "friday" + Saturday Weekday = "saturday" +) + +var daysOfWeek = map[Weekday]int{ + Sunday: 0, + Monday: 1, + Tuesday: 2, + Wednesday: 3, + Thursday: 4, + Friday: 5, + Saturday: 6, +} + +var daysOfWeekInv = map[int]Weekday{ + 0: Sunday, + 1: Monday, + 2: Tuesday, + 3: Wednesday, + 4: Thursday, + 5: Friday, + 6: Saturday, +} + +// Month of the year +type Month string + +const ( + January Month = "january" + February Month = "february" + March Month = "march" + April Month = "april" + May Month = "may" + June Month = "june" + July Month = "july" + August Month = "august" + September Month = "september" + October Month = "october" + November Month = "november" + December Month = "december" +) + +var months = map[Month]int{ + January: 1, + February: 2, + March: 3, + April: 4, + May: 5, + June: 6, + July: 7, + August: 8, + September: 9, + October: 10, + November: 11, + December: 12, +} + +var monthsInv = map[int]Month{ + 1: January, + 2: February, + 3: March, + 4: April, + 5: May, + 6: June, + 7: July, + 8: August, + 9: September, + 10: October, + 11: November, + 12: December, +} + +// URL represents a valid URL +// +kubebuilder:validation:Pattern=`^https?://.+$` +type URL string diff --git a/vendor/github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring/v1alpha1/doc.go b/vendor/github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring/v1alpha1/doc.go new file mode 100644 index 0000000000..aca68fd173 --- /dev/null +++ b/vendor/github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring/v1alpha1/doc.go @@ -0,0 +1,18 @@ +// Copyright 2020 The prometheus-operator Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// +k8s:deepcopy-gen=package +// +groupName=monitoring.coreos.com + +package v1alpha1 diff --git a/vendor/github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring/v1alpha1/prometheusagent_types.go b/vendor/github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring/v1alpha1/prometheusagent_types.go new file mode 100644 index 0000000000..e9dfdad695 --- /dev/null +++ b/vendor/github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring/v1alpha1/prometheusagent_types.go @@ -0,0 +1,131 @@ +// Copyright 2023 The prometheus-operator Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package v1alpha1 + +import ( + monitoringv1 "github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" +) + +const ( + PrometheusAgentsKind = "PrometheusAgent" + PrometheusAgentName = "prometheusagents" + PrometheusAgentKindKey = "prometheusagent" +) + +func (l *PrometheusAgent) GetCommonPrometheusFields() monitoringv1.CommonPrometheusFields { + return l.Spec.CommonPrometheusFields +} + +func (l *PrometheusAgent) SetCommonPrometheusFields(f monitoringv1.CommonPrometheusFields) { + l.Spec.CommonPrometheusFields = f +} + +func (l *PrometheusAgent) GetStatus() monitoringv1.PrometheusStatus { + return l.Status +} + +// +genclient +// +k8s:openapi-gen=true +// +kubebuilder:resource:categories="prometheus-operator",shortName="promagent" +// +kubebuilder:printcolumn:name="Version",type="string",JSONPath=".spec.version",description="The version of Prometheus agent" +// +kubebuilder:printcolumn:name="Desired",type="integer",JSONPath=".spec.replicas",description="The number of desired replicas" +// +kubebuilder:printcolumn:name="Ready",type="integer",JSONPath=".status.availableReplicas",description="The number of ready replicas" +// +kubebuilder:printcolumn:name="Reconciled",type="string",JSONPath=".status.conditions[?(@.type == 'Reconciled')].status" +// +kubebuilder:printcolumn:name="Available",type="string",JSONPath=".status.conditions[?(@.type == 'Available')].status" +// +kubebuilder:printcolumn:name="Age",type="date",JSONPath=".metadata.creationTimestamp" +// +kubebuilder:printcolumn:name="Paused",type="boolean",JSONPath=".status.paused",description="Whether the resource reconciliation is paused or not",priority=1 +// +kubebuilder:subresource:status +// +kubebuilder:subresource:scale:specpath=.spec.shards,statuspath=.status.shards,selectorpath=.status.selector +// +genclient:method=GetScale,verb=get,subresource=scale,result=k8s.io/api/autoscaling/v1.Scale +// +genclient:method=UpdateScale,verb=update,subresource=scale,input=k8s.io/api/autoscaling/v1.Scale,result=k8s.io/api/autoscaling/v1.Scale + +// The `PrometheusAgent` custom resource definition (CRD) defines a desired [Prometheus Agent](https://prometheus.io/blog/2021/11/16/agent/) setup to run in a Kubernetes cluster. +// +// The CRD is very similar to the `Prometheus` CRD except for features which aren't available in agent mode like rule evaluation, persistent storage and Thanos sidecar. +type PrometheusAgent struct { + // TypeMeta defines the versioned schema of this representation of an object. + metav1.TypeMeta `json:",inline"` + // metadata defines ObjectMeta as the metadata that all persisted resources. + // +optional + metav1.ObjectMeta `json:"metadata,omitempty"` + // spec defines the specification of the desired behavior of the Prometheus agent. More info: + // https://github.com/kubernetes/community/blob/master/contributors/devel/sig-architecture/api-conventions.md#spec-and-status + // +required + Spec PrometheusAgentSpec `json:"spec"` + // status defines the most recent observed status of the Prometheus cluster. Read-only. + // More info: + // https://github.com/kubernetes/community/blob/master/contributors/devel/sig-architecture/api-conventions.md#spec-and-status + // +optional + Status monitoringv1.PrometheusStatus `json:"status,omitempty"` +} + +// DeepCopyObject implements the runtime.Object interface. +func (l *PrometheusAgent) DeepCopyObject() runtime.Object { + return l.DeepCopy() +} + +// PrometheusAgentList is a list of Prometheus agents. +// +k8s:openapi-gen=true +type PrometheusAgentList struct { + // TypeMeta defines the versioned schema of this representation of an object. + metav1.TypeMeta `json:",inline"` + // metadata defines ListMeta as metadata for collection responses. + metav1.ListMeta `json:"metadata,omitempty"` + // List of Prometheus agents + Items []PrometheusAgent `json:"items"` +} + +// DeepCopyObject implements the runtime.Object interface. +func (l *PrometheusAgentList) DeepCopyObject() runtime.Object { + return l.DeepCopy() +} + +// PrometheusAgentSpec is a specification of the desired behavior of the Prometheus agent. More info: +// https://github.com/kubernetes/community/blob/master/contributors/devel/sig-architecture/api-conventions.md#spec-and-status +// +k8s:openapi-gen=true +// +kubebuilder:validation:XValidation:rule="!(has(self.mode) && self.mode == 'DaemonSet' && has(self.replicas))",message="replicas cannot be set when mode is DaemonSet" +// +kubebuilder:validation:XValidation:rule="!(has(self.mode) && self.mode == 'DaemonSet' && has(self.storage))",message="storage cannot be set when mode is DaemonSet" +// +kubebuilder:validation:XValidation:rule="!(has(self.mode) && self.mode == 'DaemonSet' && has(self.shards) && self.shards > 1)",message="shards cannot be greater than 1 when mode is DaemonSet" +// +kubebuilder:validation:XValidation:rule="!(has(self.mode) && self.mode == 'DaemonSet' && has(self.persistentVolumeClaimRetentionPolicy))",message="persistentVolumeClaimRetentionPolicy cannot be set when mode is DaemonSet" +// +kubebuilder:validation:XValidation:rule="!(has(self.mode) && self.mode == 'DaemonSet' && has(self.scrapeConfigSelector))",message="scrapeConfigSelector cannot be set when mode is DaemonSet" +// +kubebuilder:validation:XValidation:rule="!(has(self.mode) && self.mode == 'DaemonSet' && has(self.probeSelector))",message="probeSelector cannot be set when mode is DaemonSet" +// +kubebuilder:validation:XValidation:rule="!(has(self.mode) && self.mode == 'DaemonSet' && has(self.scrapeConfigNamespaceSelector))",message="scrapeConfigNamespaceSelector cannot be set when mode is DaemonSet" +// +kubebuilder:validation:XValidation:rule="!(has(self.mode) && self.mode == 'DaemonSet' && has(self.probeNamespaceSelector))",message="probeNamespaceSelector cannot be set when mode is DaemonSet" +// +kubebuilder:validation:XValidation:rule="!(has(self.mode) && self.mode == 'DaemonSet' && has(self.serviceMonitorSelector))",message="serviceMonitorSelector cannot be set when mode is DaemonSet" +// +kubebuilder:validation:XValidation:rule="!(has(self.mode) && self.mode == 'DaemonSet' && has(self.serviceMonitorNamespaceSelector))",message="serviceMonitorNamespaceSelector cannot be set when mode is DaemonSet" +// +kubebuilder:validation:XValidation:rule="!(has(self.mode) && self.mode == 'DaemonSet' && has(self.additionalScrapeConfigs))",message="additionalScrapeConfigs cannot be set when mode is DaemonSet" +type PrometheusAgentSpec struct { + // mode defines how the Prometheus operator deploys the PrometheusAgent pod(s). + // + // (Alpha) Using this field requires the `PrometheusAgentDaemonSet` feature gate to be enabled. + // + // +optional + Mode *PrometheusAgentMode `json:"mode,omitempty"` + + monitoringv1.CommonPrometheusFields `json:",inline"` +} + +// +kubebuilder:validation:Enum=StatefulSet;DaemonSet +type PrometheusAgentMode string + +const ( + // Deploys PrometheusAgent as DaemonSet. + DaemonSetPrometheusAgentMode PrometheusAgentMode = "DaemonSet" + + // Deploys PrometheusAgent as StatefulSet. + StatefulSetPrometheusAgentMode PrometheusAgentMode = "StatefulSet" +) diff --git a/vendor/github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring/v1alpha1/register.go b/vendor/github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring/v1alpha1/register.go new file mode 100644 index 0000000000..e783809136 --- /dev/null +++ b/vendor/github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring/v1alpha1/register.go @@ -0,0 +1,59 @@ +// Copyright 2020 The prometheus-operator Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package v1alpha1 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" + + "github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring" +) + +// SchemeGroupVersion is the group version used to register these objects +var SchemeGroupVersion = schema.GroupVersion{Group: monitoring.GroupName, Version: Version} + +// Resource takes an unqualified resource and returns a Group qualified GroupResource +func Resource(resource string) schema.GroupResource { + return SchemeGroupVersion.WithResource(resource).GroupResource() +} + +var ( + // localSchemeBuilder and AddToScheme will stay in k8s.io/kubernetes. + SchemeBuilder runtime.SchemeBuilder + localSchemeBuilder = &SchemeBuilder + AddToScheme = localSchemeBuilder.AddToScheme +) + +func init() { + // We only register manually written functions here. The registration of the + // generated functions takes place in the generated files. The separation + // makes the code compile even when the generated files are missing. + localSchemeBuilder.Register(addKnownTypes) +} + +// Adds the list of known types to api.Scheme. +func addKnownTypes(scheme *runtime.Scheme) error { + scheme.AddKnownTypes(SchemeGroupVersion, + &AlertmanagerConfig{}, + &AlertmanagerConfigList{}, + &PrometheusAgent{}, + &PrometheusAgentList{}, + &ScrapeConfig{}, + &ScrapeConfigList{}, + ) + metav1.AddToGroupVersion(scheme, SchemeGroupVersion) + return nil +} diff --git a/vendor/github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring/v1alpha1/scrapeconfig_types.go b/vendor/github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring/v1alpha1/scrapeconfig_types.go new file mode 100644 index 0000000000..972ac16ded --- /dev/null +++ b/vendor/github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring/v1alpha1/scrapeconfig_types.go @@ -0,0 +1,1528 @@ +// Copyright 2023 The prometheus-operator Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package v1alpha1 + +import ( + v1 "github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring/v1" + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" +) + +const ( + ScrapeConfigsKind = "ScrapeConfig" + ScrapeConfigName = "scrapeconfigs" + ScrapeConfigKindKey = "scrapeconfig" +) + +// Target represents a target for Prometheus to scrape +// kubebuilder:validation:MinLength:=1 +type Target string + +// SDFile represents a file used for service discovery +// +kubebuilder:validation:Pattern=`^[^*]*(\*[^/]*)?\.(json|yml|yaml|JSON|YML|YAML)$` +type SDFile string + +// NamespaceDiscovery is the configuration for discovering +// Kubernetes namespaces. +type NamespaceDiscovery struct { + // ownNamespace includes the namespace in which the Prometheus pod runs to the list of watched namespaces. + // +optional + IncludeOwnNamespace *bool `json:"ownNamespace,omitempty"` // nolint:kubeapilinter + // names defines a list of namespaces where to watch for resources. + // If empty and `ownNamespace` isn't true, Prometheus watches for resources in all namespaces. + // +listType=set + // +optional + Names []string `json:"names,omitempty"` +} + +type AttachMetadata struct { + // node attaches node metadata to discovered targets. + // When set to true, Prometheus must have the `get` permission on the + // `Nodes` objects. + // Only valid for Pod, Endpoint and Endpointslice roles. + // + // +optional + Node *bool `json:"node,omitempty"` // nolint:kubeapilinter +} + +// Filter name and value pairs to limit the discovery process to a subset of available resources. +type Filter struct { + // name of the Filter. + // +kubebuilder:vaidation:MinLength=1 + // +required + Name string `json:"name"` + // values defines values to filter on. + // + // +kubebuilder:validation:MinItems=1 + // +kubebuilder:validation:items:MinLength=1 + // +listType=set + // +required + Values []string `json:"values"` +} + +// +listType:=map +// +listMapKey:=name +type Filters []Filter + +// +kubebuilder:validation:Enum=Pod;Endpoints;Ingress;Service;Node;EndpointSlice +type KubernetesRole string + +const ( + KubernetesRolePod KubernetesRole = "Pod" + KubernetesRoleEndpoint KubernetesRole = "Endpoints" + KubernetesRoleIngress KubernetesRole = "Ingress" + KubernetesRoleService KubernetesRole = "Service" + KubernetesRoleNode KubernetesRole = "Node" + KubernetesRoleEndpointSlice KubernetesRole = "EndpointSlice" +) + +// K8SSelectorConfig is Kubernetes Selector Config +type K8SSelectorConfig struct { + // role defines the type of Kubernetes resource to limit the service discovery to. + // Accepted values are: Node, Pod, Endpoints, EndpointSlice, Service, Ingress. + // +required + Role KubernetesRole `json:"role"` + // label defines an optional label selector to limit the service discovery to resources with specific labels and label values. + // e.g: `node.kubernetes.io/instance-type=master` + // +kubebuilder:validation:MinLength=1 + // +optional + Label *string `json:"label,omitempty"` + // field defines an optional field selector to limit the service discovery to resources which have fields with specific values. + // e.g: `metadata.name=foobar` + // +kubebuilder:validation:MinLength=1 + // +optional + Field *string `json:"field,omitempty"` +} + +// +genclient +// +k8s:openapi-gen=true +// +kubebuilder:resource:categories="prometheus-operator",shortName="scfg" +// +kubebuilder:storageversion +// +kubebuilder:subresource:status + +// ScrapeConfig defines a namespaced Prometheus scrape_config to be aggregated across +// multiple namespaces into the Prometheus configuration. +type ScrapeConfig struct { + // TypeMeta defines the versioned schema of this representation of an object. + metav1.TypeMeta `json:",inline"` + // metadata defines ObjectMeta as the metadata that all persisted resources. + // +optional + metav1.ObjectMeta `json:"metadata,omitempty"` + // spec defines the specification of ScrapeConfigSpec. + // +required + Spec ScrapeConfigSpec `json:"spec"` + // status defines the status subresource. It is under active development and is updated only when the + // "StatusForConfigurationResources" feature gate is enabled. + // + // Most recent observed status of the ScrapeConfig. Read-only. + // More info: + // https://github.com/kubernetes/community/blob/master/contributors/devel/sig-architecture/api-conventions.md#spec-and-status + // +optional + Status v1.ConfigResourceStatus `json:"status,omitempty,omitzero"` +} + +// DeepCopyObject implements the runtime.Object interface. +func (l *ScrapeConfig) DeepCopyObject() runtime.Object { + return l.DeepCopy() +} + +func (l *ScrapeConfig) Bindings() []v1.WorkloadBinding { + return l.Status.Bindings +} + +// ScrapeConfigList is a list of ScrapeConfigs. +// +k8s:openapi-gen=true +type ScrapeConfigList struct { + // TypeMeta defines the versioned schema of this representation of an object. + metav1.TypeMeta `json:",inline"` + // metadata defines ListMeta as metadata for collection responses. + // +optional + metav1.ListMeta `json:"metadata,omitempty"` + // List of ScrapeConfigs + // +required + Items []ScrapeConfig `json:"items"` +} + +// DeepCopyObject implements the runtime.Object interface. +func (l *ScrapeConfigList) DeepCopyObject() runtime.Object { + return l.DeepCopy() +} + +// ScrapeConfigSpec is a specification of the desired configuration for a scrape configuration. +// +k8s:openapi-gen=true +type ScrapeConfigSpec struct { + // jobName defines the value of the `job` label assigned to the scraped metrics by default. + // + // The `job_name` field in the rendered scrape configuration is always controlled by the + // operator to prevent duplicate job names, which Prometheus does not allow. Instead the + // `job` label is set by means of relabeling configs. + // + // +kubebuilder:validation:MinLength=1 + // +optional + JobName *string `json:"jobName,omitempty"` + // staticConfigs defines a list of static targets with a common label set. + // +optional + StaticConfigs []StaticConfig `json:"staticConfigs,omitempty"` + // fileSDConfigs defines a list of file service discovery configurations. + // +optional + FileSDConfigs []FileSDConfig `json:"fileSDConfigs,omitempty"` + // httpSDConfigs defines a list of HTTP service discovery configurations. + // +optional + HTTPSDConfigs []HTTPSDConfig `json:"httpSDConfigs,omitempty"` + // kubernetesSDConfigs defines a list of Kubernetes service discovery configurations. + // +optional + KubernetesSDConfigs []KubernetesSDConfig `json:"kubernetesSDConfigs,omitempty"` + // consulSDConfigs defines a list of Consul service discovery configurations. + // +optional + ConsulSDConfigs []ConsulSDConfig `json:"consulSDConfigs,omitempty"` + // dnsSDConfigs defines a list of DNS service discovery configurations. + // +optional + DNSSDConfigs []DNSSDConfig `json:"dnsSDConfigs,omitempty"` + // ec2SDConfigs defines a list of EC2 service discovery configurations. + // +optional + EC2SDConfigs []EC2SDConfig `json:"ec2SDConfigs,omitempty"` + // azureSDConfigs defines a list of Azure service discovery configurations. + // +optional + AzureSDConfigs []AzureSDConfig `json:"azureSDConfigs,omitempty"` + // gceSDConfigs defines a list of GCE service discovery configurations. + // +optional + GCESDConfigs []GCESDConfig `json:"gceSDConfigs,omitempty"` + // openstackSDConfigs defines a list of OpenStack service discovery configurations. + // +optional + OpenStackSDConfigs []OpenStackSDConfig `json:"openstackSDConfigs,omitempty"` + // digitalOceanSDConfigs defines a list of DigitalOcean service discovery configurations. + // +optional + DigitalOceanSDConfigs []DigitalOceanSDConfig `json:"digitalOceanSDConfigs,omitempty"` + // kumaSDConfigs defines a list of Kuma service discovery configurations. + // +optional + KumaSDConfigs []KumaSDConfig `json:"kumaSDConfigs,omitempty"` + // eurekaSDConfigs defines a list of Eureka service discovery configurations. + // +optional + EurekaSDConfigs []EurekaSDConfig `json:"eurekaSDConfigs,omitempty"` + // dockerSDConfigs defines a list of Docker service discovery configurations. + // +optional + DockerSDConfigs []DockerSDConfig `json:"dockerSDConfigs,omitempty"` + // linodeSDConfigs defines a list of Linode service discovery configurations. + // +optional + LinodeSDConfigs []LinodeSDConfig `json:"linodeSDConfigs,omitempty"` + // hetznerSDConfigs defines a list of Hetzner service discovery configurations. + // +optional + HetznerSDConfigs []HetznerSDConfig `json:"hetznerSDConfigs,omitempty"` + // nomadSDConfigs defines a list of Nomad service discovery configurations. + // +optional + NomadSDConfigs []NomadSDConfig `json:"nomadSDConfigs,omitempty"` + // dockerSwarmSDConfigs defines a list of Dockerswarm service discovery configurations. + // +optional + DockerSwarmSDConfigs []DockerSwarmSDConfig `json:"dockerSwarmSDConfigs,omitempty"` + // puppetDBSDConfigs defines a list of PuppetDB service discovery configurations. + // +optional + PuppetDBSDConfigs []PuppetDBSDConfig `json:"puppetDBSDConfigs,omitempty"` + // lightSailSDConfigs defines a list of Lightsail service discovery configurations. + // +optional + LightSailSDConfigs []LightSailSDConfig `json:"lightSailSDConfigs,omitempty"` + // ovhcloudSDConfigs defines a list of OVHcloud service discovery configurations. + // +optional + OVHCloudSDConfigs []OVHCloudSDConfig `json:"ovhcloudSDConfigs,omitempty"` + // scalewaySDConfigs defines a list of Scaleway instances and baremetal service discovery configurations. + // +optional + ScalewaySDConfigs []ScalewaySDConfig `json:"scalewaySDConfigs,omitempty"` + // ionosSDConfigs defines a list of IONOS service discovery configurations. + // +optional + IonosSDConfigs []IonosSDConfig `json:"ionosSDConfigs,omitempty"` + // relabelings defines how to rewrite the target's labels before scraping. + // Prometheus Operator automatically adds relabelings for a few standard Kubernetes fields. + // The original scrape job's name is available via the `__tmp_prometheus_job_name` label. + // More info: https://prometheus.io/docs/prometheus/latest/configuration/configuration/#relabel_config + // +kubebuilder:validation:MinItems:=1 + // +optional + RelabelConfigs []v1.RelabelConfig `json:"relabelings,omitempty"` + // metricsPath defines the HTTP path to scrape for metrics. If empty, Prometheus uses the default value (e.g. /metrics). + // +kubebuilder:validation:MinLength:=1 + // +optional + MetricsPath *string `json:"metricsPath,omitempty"` + // scrapeInterval defines the interval between consecutive scrapes. + // +optional + ScrapeInterval *v1.Duration `json:"scrapeInterval,omitempty"` + // scrapeTimeout defines the number of seconds to wait until a scrape request times out. + // The value cannot be greater than the scrape interval otherwise the operator will reject the resource. + // +optional + ScrapeTimeout *v1.Duration `json:"scrapeTimeout,omitempty"` + // scrapeProtocols defines the protocols to negotiate during a scrape. It tells clients the + // protocols supported by Prometheus in order of preference (from most to least preferred). + // + // If unset, Prometheus uses its default value. + // + // It requires Prometheus >= v2.49.0. + // + // +listType=set + // +kubebuilder:validation:MinItems:=1 + // +optional + ScrapeProtocols []v1.ScrapeProtocol `json:"scrapeProtocols,omitempty"` + // fallbackScrapeProtocol defines the protocol to use if a scrape returns blank, unparseable, or otherwise invalid Content-Type. + // + // It requires Prometheus >= v3.0.0. + // +optional + FallbackScrapeProtocol *v1.ScrapeProtocol `json:"fallbackScrapeProtocol,omitempty"` + // honorTimestamps defines whether Prometheus preserves the timestamps + // when exposed by the target. + // +optional + HonorTimestamps *bool `json:"honorTimestamps,omitempty"` // nolint:kubeapilinter + // trackTimestampsStaleness defines whether Prometheus tracks staleness of + // the metrics that have an explicit timestamp present in scraped data. + // Has no effect if `honorTimestamps` is false. + // It requires Prometheus >= v2.48.0. + // + // +optional + TrackTimestampsStaleness *bool `json:"trackTimestampsStaleness,omitempty"` // nolint:kubeapilinter + // honorLabels defines when true the metric's labels when they collide + // with the target's labels. + // +optional + HonorLabels *bool `json:"honorLabels,omitempty"` // nolint:kubeapilinter + // params defines optional HTTP URL parameters + // +mapType:=atomic + // +optional + //nolint:kubeapilinter + Params map[string][]string `json:"params,omitempty"` + // scheme defines the protocol scheme used for requests. + // +optional + Scheme *v1.Scheme `json:"scheme,omitempty"` + // enableCompression when false, Prometheus will request uncompressed response from the scraped target. + // + // It requires Prometheus >= v2.49.0. + // + // If unset, Prometheus uses true by default. + // +optional + EnableCompression *bool `json:"enableCompression,omitempty"` // nolint:kubeapilinter + // enableHTTP2 defines whether to enable HTTP2. + // +optional + EnableHTTP2 *bool `json:"enableHTTP2,omitempty"` // nolint:kubeapilinter + // basicAuth defines information to use on every scrape request. + // +optional + BasicAuth *v1.BasicAuth `json:"basicAuth,omitempty"` + // authorization defines the header to use on every scrape request. + // +optional + Authorization *v1.SafeAuthorization `json:"authorization,omitempty"` + // oauth2 defines the configuration to use on every scrape request. + // +optional + OAuth2 *v1.OAuth2 `json:"oauth2,omitempty"` + // tlsConfig defines the TLS configuration to use on every scrape request + // +optional + TLSConfig *v1.SafeTLSConfig `json:"tlsConfig,omitempty"` + // sampleLimit defines per-scrape limit on number of scraped samples that will be accepted. + // +optional + SampleLimit *uint64 `json:"sampleLimit,omitempty"` + // targetLimit defines a limit on the number of scraped targets that will be accepted. + // +optional + TargetLimit *uint64 `json:"targetLimit,omitempty"` + // labelLimit defines the per-scrape limit on number of labels that will be accepted for a sample. + // Only valid in Prometheus versions 2.27.0 and newer. + // +optional + LabelLimit *uint64 `json:"labelLimit,omitempty"` + // labelNameLengthLimit defines the per-scrape limit on length of labels name that will be accepted for a sample. + // Only valid in Prometheus versions 2.27.0 and newer. + // +optional + LabelNameLengthLimit *uint64 `json:"labelNameLengthLimit,omitempty"` + // labelValueLengthLimit defines the per-scrape limit on length of labels value that will be accepted for a sample. + // Only valid in Prometheus versions 2.27.0 and newer. + // +optional + LabelValueLengthLimit *uint64 `json:"labelValueLengthLimit,omitempty"` + + v1.NativeHistogramConfig `json:",inline"` + // keepDroppedTargets defines the per-scrape limit on the number of targets dropped by relabeling + // that will be kept in memory. 0 means no limit. + // + // It requires Prometheus >= v2.47.0. + // + // +optional + KeepDroppedTargets *uint64 `json:"keepDroppedTargets,omitempty"` + // metricRelabelings defines the metricRelabelings to apply to samples before ingestion. + // +kubebuilder:validation:MinItems:=1 + // +optional + MetricRelabelConfigs []v1.RelabelConfig `json:"metricRelabelings,omitempty"` + // ProxyConfig allows customizing the proxy behaviour for this scrape config. + // +optional + v1.ProxyConfig `json:",inline"` + // nameValidationScheme defines the validation scheme for metric and label names. + // + // It requires Prometheus >= v3.0.0. + // + // +optional + NameValidationScheme *v1.NameValidationSchemeOptions `json:"nameValidationScheme,omitempty"` + // nameEscapingScheme defines the metric name escaping mode to request through content negotiation. + // + // It requires Prometheus >= v3.4.0. + // + // +optional + NameEscapingScheme *v1.NameEscapingSchemeOptions `json:"nameEscapingScheme,omitempty"` + // scrapeClass defines the scrape class to apply. + // +kubebuilder:validation:MinLength=1 + // +optional + ScrapeClassName *string `json:"scrapeClass,omitempty"` +} + +// StaticConfig defines a Prometheus static configuration. +// See https://prometheus.io/docs/prometheus/latest/configuration/configuration/#scrape_config +// +k8s:openapi-gen=true +type StaticConfig struct { + // targets defines the list of targets for this static configuration. + // +kubebuilder:validation:MinItems:=1 + // +listType=set + // +required + Targets []Target `json:"targets"` + // labels defines labels assigned to all metrics scraped from the targets. + // +mapType:=atomic + // +optional + //nolint:kubeapilinter + Labels map[string]string `json:"labels,omitempty"` +} + +// FileSDConfig defines a Prometheus file service discovery configuration +// See https://prometheus.io/docs/prometheus/latest/configuration/configuration/#file_sd_config +// +k8s:openapi-gen=true +type FileSDConfig struct { + // files defines the list of files to be used for file discovery. Recommendation: use absolute paths. While relative paths work, the + // prometheus-operator project makes no guarantees about the working directory where the configuration file is + // stored. + // Files must be mounted using Prometheus.ConfigMaps or Prometheus.Secrets. + // +kubebuilder:validation:MinItems:=1 + // +listType=set + // +required + Files []SDFile `json:"files"` + // refreshInterval defines the time after which the provided names are refreshed. + // If not set, Prometheus uses its default value. + // +optional + RefreshInterval *v1.Duration `json:"refreshInterval,omitempty"` +} + +// HTTPSDConfig defines a prometheus HTTP service discovery configuration +// See https://prometheus.io/docs/prometheus/latest/configuration/configuration/#http_sd_config +// +k8s:openapi-gen=true +type HTTPSDConfig struct { + // url defines the URL from which the targets are fetched. + // +kubebuilder:validation:MinLength:=1 + // +kubebuilder:validation:Pattern:="^http(s)?://.+$" + // +required + URL string `json:"url"` + // refreshInterval defines the time after which the provided names are refreshed. + // If not set, Prometheus uses its default value. + // +optional + RefreshInterval *v1.Duration `json:"refreshInterval,omitempty"` + // basicAuth defines information to use on every scrape request. + // More info: https://prometheus.io/docs/operating/configuration/#endpoints + // Cannot be set at the same time as `authorization`, or `oAuth2`. + // +optional + BasicAuth *v1.BasicAuth `json:"basicAuth,omitempty"` + // authorization defines the authorization header configuration to authenticate against the target HTTP endpoint. + // Cannot be set at the same time as `oAuth2`, or `basicAuth`. + // +optional + Authorization *v1.SafeAuthorization `json:"authorization,omitempty"` + // oauth2 defines the optional OAuth 2.0 configuration to authenticate against the target HTTP endpoint. + // Cannot be set at the same time as `authorization`, or `basicAuth`. + // +optional + OAuth2 *v1.OAuth2 `json:"oauth2,omitempty"` + v1.ProxyConfig `json:",inline"` + // tlsConfig defines the TLS configuration applying to the target HTTP endpoint. + // +optional + TLSConfig *v1.SafeTLSConfig `json:"tlsConfig,omitempty"` + // followRedirects defines whether HTTP requests follow HTTP 3xx redirects. + // +optional + FollowRedirects *bool `json:"followRedirects,omitempty"` // nolint:kubeapilinter + // enableHTTP2 defines whether to enable HTTP2. + // +optional + EnableHTTP2 *bool `json:"enableHTTP2,omitempty"` // nolint:kubeapilinter +} + +// KubernetesSDConfig allows retrieving scrape targets from Kubernetes' REST API. +// See https://prometheus.io/docs/prometheus/latest/configuration/configuration/#kubernetes_sd_config +// +k8s:openapi-gen=true +type KubernetesSDConfig struct { + // apiServer defines the API server address consisting of a hostname or IP address followed + // by an optional port number. + // If left empty, Prometheus is assumed to run inside + // of the cluster. It will discover API servers automatically and use the pod's + // CA certificate and bearer token file at /var/run/secrets/kubernetes.io/serviceaccount/. + // +kubebuilder:validation:MinLength=1 + // +optional + APIServer *string `json:"apiServer,omitempty"` + // role defines the Kubernetes role of the entities that should be discovered. + // Role `Endpointslice` requires Prometheus >= v2.21.0 + // +required + Role KubernetesRole `json:"role"` + // namespaces defines the namespace discovery. If omitted, Prometheus discovers targets across all namespaces. + // +optional + Namespaces *NamespaceDiscovery `json:"namespaces,omitempty"` + // attachMetadata defines the metadata to attach to discovered targets. + // It requires Prometheus >= v2.35.0 when using the `Pod` role and + // Prometheus >= v2.37.0 for `Endpoints` and `Endpointslice` roles. + // +optional + AttachMetadata *AttachMetadata `json:"attachMetadata,omitempty"` + // selectors defines the selector to select objects. + // It requires Prometheus >= v2.17.0 + // +optional + // +listType=map + // +listMapKey=role + Selectors []K8SSelectorConfig `json:"selectors,omitempty"` + // basicAuth defines information to use on every scrape request. + // Cannot be set at the same time as `authorization`, or `oauth2`. + // +optional + BasicAuth *v1.BasicAuth `json:"basicAuth,omitempty"` + // authorization defines the authorization header to use on every scrape request. + // Cannot be set at the same time as `basicAuth`, or `oauth2`. + // +optional + Authorization *v1.SafeAuthorization `json:"authorization,omitempty"` + // oauth2 defines the optional OAuth 2.0 configuration to authenticate against the target HTTP endpoint. + // Cannot be set at the same time as `authorization`, or `basicAuth`. + // +optional + OAuth2 *v1.OAuth2 `json:"oauth2,omitempty"` + v1.ProxyConfig `json:",inline"` + // followRedirects defines whether HTTP requests follow HTTP 3xx redirects. + // +optional + FollowRedirects *bool `json:"followRedirects,omitempty"` // nolint:kubeapilinter + // enableHTTP2 defines whether to enable HTTP2. + // +optional + EnableHTTP2 *bool `json:"enableHTTP2,omitempty"` // nolint:kubeapilinter + // tlsConfig defines the TLS configuration to connect to the Kubernetes API. + // +optional + TLSConfig *v1.SafeTLSConfig `json:"tlsConfig,omitempty"` +} + +// ConsulSDConfig defines a Consul service discovery configuration +// See https://prometheus.io/docs/prometheus/latest/configuration/configuration/#consul_sd_config +// +k8s:openapi-gen=true +type ConsulSDConfig struct { + // server defines the consul server address. A valid string consisting of a hostname or IP followed by an optional port number. + // +kubebuilder:validation:MinLength=1 + // +required + Server string `json:"server"` + // pathPrefix defines the prefix for URIs for when consul is behind an API gateway (reverse proxy). + // + // It requires Prometheus >= 2.45.0. + // +kubebuilder:validation:MinLength=1 + // +optional + PathPrefix *string `json:"pathPrefix,omitempty"` + // tokenRef defines the consul ACL TokenRef, if not provided it will use the ACL from the local Consul Agent. + // +optional + TokenRef *corev1.SecretKeySelector `json:"tokenRef,omitempty"` + // datacenter defines the consul Datacenter name, if not provided it will use the local Consul Agent Datacenter. + // +kubebuilder:validation:MinLength=1 + // +optional + Datacenter *string `json:"datacenter,omitempty"` + // namespace are only supported in Consul Enterprise. + // + // It requires Prometheus >= 2.28.0. + // +kubebuilder:validation:MinLength=1 + // +optional + Namespace *string `json:"namespace,omitempty"` + // partition defines the admin Partitions are only supported in Consul Enterprise. + // +kubebuilder:validation:MinLength=1 + // +optional + Partition *string `json:"partition,omitempty"` + // scheme defines the HTTP Scheme. + // +optional + Scheme *v1.Scheme `json:"scheme,omitempty"` + // services defines a list of services for which targets are retrieved. If omitted, all services are scraped. + // +listType:=set + // +optional + Services []string `json:"services,omitempty"` + // tags defines an optional list of tags used to filter nodes for a given service. Services must contain all tags in the list. + // Starting with Consul 1.14, it is recommended to use `filter` with the `ServiceTags` selector instead. + // +listType:=set + // +optional + Tags []string `json:"tags,omitempty"` + // tagSeparator defines the string by which Consul tags are joined into the tag label. + // If unset, Prometheus uses its default value. + // +kubebuilder:validation:MinLength=1 + // +optional + TagSeparator *string `json:"tagSeparator,omitempty"` + // nodeMeta defines the node metadata key/value pairs to filter nodes for a given service. + // Starting with Consul 1.14, it is recommended to use `filter` with the `NodeMeta` selector instead. + // +mapType:=atomic + // +optional + //nolint:kubeapilinter + NodeMeta map[string]string `json:"nodeMeta,omitempty"` + // filter defines the filter expression used to filter the catalog results. + // See https://www.consul.io/api-docs/catalog#list-services + // It requires Prometheus >= 3.0.0. + // +kubebuilder:validation:MinLength=1 + // +optional + Filter *string `json:"filter,omitempty"` + // allowStale Consul results (see https://www.consul.io/api/features/consistency.html). Will reduce load on Consul. + // If unset, Prometheus uses its default value. + // +optional + AllowStale *bool `json:"allowStale,omitempty"` // nolint:kubeapilinter + // refreshInterval defines the time after which the provided names are refreshed. + // If not set, Prometheus uses its default value. + // +optional + RefreshInterval *v1.Duration `json:"refreshInterval,omitempty"` + // basicAuth defines the information to authenticate against the Consul Server. + // More info: https://prometheus.io/docs/operating/configuration/#endpoints + // Cannot be set at the same time as `authorization`, or `oauth2`. + // +optional + BasicAuth *v1.BasicAuth `json:"basicAuth,omitempty"` + // authorization defines the header configuration to authenticate against the Consul Server. + // Cannot be set at the same time as `basicAuth`, or `oauth2`. + // +optional + Authorization *v1.SafeAuthorization `json:"authorization,omitempty"` + // oauth2 defines the optional OAuth 2.0 configuration to authenticate against the target HTTP endpoint. + // Cannot be set at the same time as `authorization`, or `basicAuth`. + // +optional + OAuth2 *v1.OAuth2 `json:"oauth2,omitempty"` + v1.ProxyConfig `json:",inline"` + // followRedirects defines whether HTTP requests follow HTTP 3xx redirects. + // +optional + FollowRedirects *bool `json:"followRedirects,omitempty"` // nolint:kubeapilinter + // enableHTTP2 defines whether to enable HTTP2. + // +optional + EnableHttp2 *bool `json:"enableHTTP2,omitempty"` // nolint:kubeapilinter + // tlsConfig defines the TLS configuration to connect to the Consul API. + // +optional + TLSConfig *v1.SafeTLSConfig `json:"tlsConfig,omitempty"` +} + +// +kubebuilder:validation:Enum=A;AAAA;MX;NS;SRV +type DNSRecordType string + +const ( + DNSRecordTypeA DNSRecordType = "A" + DNSRecordTypeSRV DNSRecordType = "SRV" + DNSRecordTypeAAAA DNSRecordType = "AAAA" + DNSRecordTypeMX DNSRecordType = "MX" + DNSRecordTypeNS DNSRecordType = "NS" +) + +// DNSSDConfig allows specifying a set of DNS domain names which are periodically queried to discover a list of targets. +// The DNS servers to be contacted are read from /etc/resolv.conf. +// See https://prometheus.io/docs/prometheus/latest/configuration/configuration/#dns_sd_config +// +k8s:openapi-gen=true +type DNSSDConfig struct { + // names defines a list of DNS domain names to be queried. + // +kubebuilder:validation:MinItems:=1 + // +kubebuilder:validation:items:MinLength=1 + // +required + Names []string `json:"names"` + // refreshInterval defines the time after which the provided names are refreshed. + // If not set, Prometheus uses its default value. + // +optional + RefreshInterval *v1.Duration `json:"refreshInterval,omitempty"` + // type defines the type of DNS query to perform. One of SRV, A, AAAA, MX or NS. + // If not set, Prometheus uses its default value. + // + // When set to NS, it requires Prometheus >= v2.49.0. + // When set to MX, it requires Prometheus >= v2.38.0 + // + // +optional + Type *DNSRecordType `json:"type,omitempty"` + // port defines the port to scrape metrics from. If using the public IP address, this must + // Ignored for SRV records + // +kubebuilder:validation:Minimum=0 + // +kubebuilder:validation:Maximum=65535 + // +optional + Port *int32 `json:"port,omitempty"` +} + +// EC2SDConfig allow retrieving scrape targets from AWS EC2 instances. +// The private IP address is used by default, but may be changed to the public IP address with relabeling. +// The IAM credentials used must have the ec2:DescribeInstances permission to discover scrape targets +// See https://prometheus.io/docs/prometheus/latest/configuration/configuration/#ec2_sd_config +// +// The EC2 service discovery requires AWS API keys or role ARN for authentication. +// BasicAuth, Authorization and OAuth2 fields are not present on purpose. +// +// +k8s:openapi-gen=true +type EC2SDConfig struct { + // region defines the AWS region. + // +kubebuilder:validation:MinLength=1 + // +optional + Region *string `json:"region,omitempty"` + // accessKey defines the AWS API key. + // +optional + AccessKey *corev1.SecretKeySelector `json:"accessKey,omitempty"` + // secretKey defines the AWS API secret. + // +optional + SecretKey *corev1.SecretKeySelector `json:"secretKey,omitempty"` + // roleARN defines an alternative to using AWS API keys. + // +kubebuilder:validation:MinLength=1 + // +optional + RoleARN *string `json:"roleARN,omitempty"` + // port defines the port to scrape metrics from. If using the public IP address, this must + // instead be specified in the relabeling rule. + // +kubebuilder:validation:Minimum=0 + // +kubebuilder:validation:Maximum=65535 + // +optional + Port *int32 `json:"port,omitempty"` + // refreshInterval defines the time after which the provided names are refreshed. + // If not set, Prometheus uses its default value. + // +optional + RefreshInterval *v1.Duration `json:"refreshInterval,omitempty"` + // filters can be used optionally to filter the instance list by other criteria. + // Available filter criteria can be found here: + // https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_DescribeInstances.html + // Filter API documentation: https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_Filter.html + // It requires Prometheus >= v2.3.0 + // +optional + Filters Filters `json:"filters,omitempty"` + v1.ProxyConfig `json:",inline"` + // tlsConfig defines the TLS configuration to connect to the Consul API. + // It requires Prometheus >= v2.41.0 + // +optional + TLSConfig *v1.SafeTLSConfig `json:"tlsConfig,omitempty"` + // followRedirects defines whether HTTP requests follow HTTP 3xx redirects. + // It requires Prometheus >= v2.41.0 + // +optional + FollowRedirects *bool `json:"followRedirects,omitempty"` // nolint:kubeapilinter + // enableHTTP2 defines whether to enable HTTP2. + // It requires Prometheus >= v2.41.0 + // +optional + EnableHTTP2 *bool `json:"enableHTTP2,omitempty"` // nolint:kubeapilinter +} + +// +kubebuilder:validation:Enum=OAuth;ManagedIdentity;SDK +type AuthenticationMethodType string + +const ( + AuthMethodTypeOAuth AuthenticationMethodType = "OAuth" + AuthMethodTypeManagedIdentity AuthenticationMethodType = "ManagedIdentity" + AuthMethodTypeSDK AuthenticationMethodType = "SDK" +) + +// AzureSDConfig allow retrieving scrape targets from Azure VMs. +// See https://prometheus.io/docs/prometheus/latest/configuration/configuration/#azure_sd_config +// +k8s:openapi-gen=true +type AzureSDConfig struct { + // environment defines the Azure environment. + // +kubebuilder:validation:MinLength=1 + // +optional + Environment *string `json:"environment,omitempty"` + // authenticationMethod defines the authentication method, either `OAuth` or `ManagedIdentity` or `SDK`. + // See https://docs.microsoft.com/en-us/azure/active-directory/managed-identities-azure-resources/overview + // SDK authentication method uses environment variables by default. + // See https://learn.microsoft.com/en-us/azure/developer/go/azure-sdk-authentication + // +optional + AuthenticationMethod *AuthenticationMethodType `json:"authenticationMethod,omitempty"` + // subscriptionID defines subscription ID. Always required. + // +kubebuilder:validation:MinLength=1 + // +required + SubscriptionID string `json:"subscriptionID"` + // tenantID defines tenant ID. Only required with the OAuth authentication method. + // +kubebuilder:validation:MinLength=1 + // +optional + TenantID *string `json:"tenantID,omitempty"` + // clientID defines client ID. Only required with the OAuth authentication method. + // +kubebuilder:validation:MinLength=1 + // +optional + ClientID *string `json:"clientID,omitempty"` + // clientSecret defines client secret. Only required with the OAuth authentication method. + // +optional + ClientSecret *corev1.SecretKeySelector `json:"clientSecret,omitempty"` + // resourceGroup defines resource group name. Limits discovery to this resource group. + // Requires Prometheus v2.35.0 and above + // +kubebuilder:validation:MinLength=1 + // +optional + ResourceGroup *string `json:"resourceGroup,omitempty"` + // refreshInterval defines the time after which the provided names are refreshed. + // If not set, Prometheus uses its default value. + // +optional + RefreshInterval *v1.Duration `json:"refreshInterval,omitempty"` + // port defines the port to scrape metrics from. If using the public IP address, this must + // instead be specified in the relabeling rule. + // +kubebuilder:validation:Minimum=0 + // +kubebuilder:validation:Maximum=65535 + // +optional + Port *int32 `json:"port,omitempty"` + // basicAuth defines the information to authenticate against the target HTTP endpoint. + // More info: https://prometheus.io/docs/operating/configuration/#endpoints + // Cannot be set at the same time as `authorization`, or `oAuth2`. + // +optional + BasicAuth *v1.BasicAuth `json:"basicAuth,omitempty"` + // authorization defines the authorization header configuration to authenticate against the target HTTP endpoint. + // Cannot be set at the same time as `oAuth2`, or `basicAuth`. + // +optional + Authorization *v1.SafeAuthorization `json:"authorization,omitempty"` + // oauth2 defines the configuration to use on every scrape request. + // +optional + OAuth2 *v1.OAuth2 `json:"oauth2,omitempty"` + // ProxyConfig allows customizing the proxy behaviour for this scrape config. + // +optional + v1.ProxyConfig `json:",inline"` + // followRedirects defines whether HTTP requests follow HTTP 3xx redirects. + // +optional + FollowRedirects *bool `json:"followRedirects,omitempty"` // nolint:kubeapilinter + // enableHTTP2 defines whether to enable HTTP2. + // +optional + EnableHTTP2 *bool `json:"enableHTTP2,omitempty"` // nolint:kubeapilinter + // tlsConfig defies the TLS configuration applying to the target HTTP endpoint. + // +optional + TLSConfig *v1.SafeTLSConfig `json:"tlsConfig,omitempty"` +} + +// GCESDConfig configures scrape targets from GCP GCE instances. +// The private IP address is used by default, but may be changed to +// the public IP address with relabeling. +// See https://prometheus.io/docs/prometheus/latest/configuration/configuration/#gce_sd_config +// +// The GCE service discovery will load the Google Cloud credentials +// from the file specified by the GOOGLE_APPLICATION_CREDENTIALS environment variable. +// See https://cloud.google.com/kubernetes-engine/docs/tutorials/authenticating-to-cloud-platform +// +// A pre-requisite for using GCESDConfig is that a Secret containing valid +// Google Cloud credentials is mounted into the Prometheus or PrometheusAgent +// pod via the `.spec.secrets` field and that the GOOGLE_APPLICATION_CREDENTIALS +// environment variable is set to /etc/prometheus/secrets//. +// +k8s:openapi-gen=true +type GCESDConfig struct { + // project defines the Google Cloud Project ID + // +kubebuilder:validation:MinLength:=1 + // +required + Project string `json:"project"` + // zone defines the zone of the scrape targets. If you need multiple zones use multiple GCESDConfigs. + // +kubebuilder:validation:MinLength:=1 + // +required + Zone string `json:"zone"` + // filter defines the filter that can be used optionally to filter the instance list by other criteria + // Syntax of this filter is described in the filter query parameter section: + // https://cloud.google.com/compute/docs/reference/latest/instances/list + // +kubebuilder:validation:MinLength:=1 + // +optional + Filter *string `json:"filter,omitempty"` + // refreshInterval defines the time after which the provided names are refreshed. + // If not set, Prometheus uses its default value. + // +optional + RefreshInterval *v1.Duration `json:"refreshInterval,omitempty"` + // port defines the port to scrape metrics from. If using the public IP address, this must + // instead be specified in the relabeling rule. + // +kubebuilder:validation:Minimum=0 + // +kubebuilder:validation:Maximum=65535 + // +optional + Port *int32 `json:"port,omitempty"` + // tagSeparator defines the tag separator is used to separate the tags on concatenation + // +kubebuilder:validation:MinLength:=1 + // +optional + TagSeparator *string `json:"tagSeparator,omitempty"` +} + +// +kubebuilder:validation:Enum=Instance;Hypervisor;LoadBalancer +type OpenStackRole string + +const ( + OpenStackRoleInstance OpenStackRole = "Instance" + OpenStackRoleHypervisor OpenStackRole = "Hypervisor" + OpenStackRoleLoadBalancer OpenStackRole = "LoadBalancer" +) + +// OpenStackSDConfig allow retrieving scrape targets from OpenStack Nova instances. +// See https://prometheus.io/docs/prometheus/latest/configuration/configuration/#openstack_sd_config +// +k8s:openapi-gen=true +type OpenStackSDConfig struct { + // role defines the OpenStack role of entities that should be discovered. + // + // Note: The `LoadBalancer` role requires Prometheus >= v3.2.0. + // + // +required + Role OpenStackRole `json:"role"` + // region defines the OpenStack Region. + // +kubebuilder:validation:MinLength:=1 + // +required + Region string `json:"region"` + // identityEndpoint defines the HTTP endpoint that is required to work with + // the Identity API of the appropriate version. + // +kubebuilder:validation:Pattern:=`^http(s)?:\/\/.+$` + // +optional + IdentityEndpoint *string `json:"identityEndpoint,omitempty"` + // username defines the username required if using Identity V2 API. Consult with your provider's + // control panel to discover your account's username. + // In Identity V3, either userid or a combination of username + // and domainId or domainName are needed + // +kubebuilder:validation:MinLength:=1 + // +optional + Username *string `json:"username,omitempty"` + // userid defines the OpenStack userid. + // +kubebuilder:validation:MinLength:=1 + // +optional + UserID *string `json:"userid,omitempty"` + // password defines the password for the Identity V2 and V3 APIs. Consult with your provider's + // control panel to discover your account's preferred method of authentication. + // +optional + Password *corev1.SecretKeySelector `json:"password,omitempty"` + // domainName defines at most one of domainId and domainName that must be provided if using username + // with Identity V3. Otherwise, either are optional. + // +kubebuilder:validation:MinLength:=1 + // +optional + DomainName *string `json:"domainName,omitempty"` + // domainID defines The OpenStack domainID. + // +kubebuilder:validation:MinLength:=1 + // +optional + DomainID *string `json:"domainID,omitempty"` + // projectName defines an optional field for the Identity V2 API. + // Some providers allow you to specify a ProjectName instead of the ProjectId. + // Some require both. Your provider's authentication policies will determine + // how these fields influence authentication. + // +kubebuilder:validation:MinLength:=1 + // +optional + ProjectName *string `json:"projectName,omitempty"` + // projectID defines the OpenStack projectID. + // +kubebuilder:validation:MinLength:=1 + // +optional + ProjectID *string `json:"projectID,omitempty"` + // applicationCredentialName defines the ApplicationCredentialID or ApplicationCredentialName fields are + // required if using an application credential to authenticate. Some providers + // allow you to create an application credential to authenticate rather than a + // password. + // +kubebuilder:validation:MinLength:=1 + // +optional + ApplicationCredentialName *string `json:"applicationCredentialName,omitempty"` + // applicationCredentialId defines the OpenStack applicationCredentialId. + // +optional + ApplicationCredentialID *string `json:"applicationCredentialId,omitempty"` + // applicationCredentialSecret defines the required field if using an application + // credential to authenticate. + // +optional + ApplicationCredentialSecret *corev1.SecretKeySelector `json:"applicationCredentialSecret,omitempty"` + // allTenants defines whether the service discovery should list all instances for all projects. + // It is only relevant for the 'instance' role and usually requires admin permissions. + // +optional + AllTenants *bool `json:"allTenants,omitempty"` // nolint:kubeapilinter + // refreshInterval defines the time after which the provided names are refreshed. + // If not set, Prometheus uses its default value. + // +optional + RefreshInterval *v1.Duration `json:"refreshInterval,omitempty"` + // port defines the port to scrape metrics from. If using the public IP address, this must + // instead be specified in the relabeling rule. + // +kubebuilder:validation:Minimum=0 + // +kubebuilder:validation:Maximum=65535 + // +optional + Port *int32 `json:"port,omitempty"` + // availability defines the availability of the endpoint to connect to. + // +kubebuilder:validation:Enum=Public;public;Admin;admin;Internal;internal + // +optional + Availability *string `json:"availability,omitempty"` + // tlsConfig defines the TLS configuration applying to the target HTTP endpoint. + // +optional + TLSConfig *v1.SafeTLSConfig `json:"tlsConfig,omitempty"` +} + +// DigitalOceanSDConfig allow retrieving scrape targets from DigitalOcean's Droplets API. +// This service discovery uses the public IPv4 address by default, by that can be changed with relabeling +// See https://prometheus.io/docs/prometheus/latest/configuration/configuration/#digitalocean_sd_config +// +k8s:openapi-gen=true +type DigitalOceanSDConfig struct { + // authorization defines the header configuration to authenticate against the DigitalOcean API. + // Cannot be set at the same time as `oauth2`. + // +optional + Authorization *v1.SafeAuthorization `json:"authorization,omitempty"` + // oauth2 defines the configuration to use on every scrape request. + // +optional + OAuth2 *v1.OAuth2 `json:"oauth2,omitempty"` + // ProxyConfig allows customizing the proxy behaviour for this scrape config. + // +optional + v1.ProxyConfig `json:",inline"` + // followRedirects defines whether HTTP requests follow HTTP 3xx redirects. + // +optional + FollowRedirects *bool `json:"followRedirects,omitempty"` // nolint:kubeapilinter + // enableHTTP2 defines whether to enable HTTP2. + // +optional + EnableHTTP2 *bool `json:"enableHTTP2,omitempty"` // nolint:kubeapilinter + // tlsConfig defines the TLS configuration to connect to the Consul API. + // +optional + TLSConfig *v1.SafeTLSConfig `json:"tlsConfig,omitempty"` + // port defines the port to scrape metrics from. If using the public IP address, this must + // +kubebuilder:validation:Minimum=0 + // +kubebuilder:validation:Maximum=65535 + // +optional + Port *int32 `json:"port,omitempty"` + // refreshInterval defines the time after which the provided names are refreshed. + // If not set, Prometheus uses its default value. + // +optional + RefreshInterval *v1.Duration `json:"refreshInterval,omitempty"` +} + +// KumaSDConfig allow retrieving scrape targets from Kuma's control plane. +// See https://prometheus.io/docs/prometheus/latest/configuration/configuration/#kuma_sd_config +// +k8s:openapi-gen=true +type KumaSDConfig struct { + // server defines the address of the Kuma Control Plane's MADS xDS server. + // +required + Server URL `json:"server"` + // clientID is used by Kuma Control Plane to compute Monitoring Assignment for specific Prometheus backend. + // It requires Prometheus >= v2.50.0. + // +kubebuilder:validation:MinLength=1 + // +optional + ClientID *string `json:"clientID,omitempty"` + // refreshInterval defines the time after which the provided names are refreshed. + // If not set, Prometheus uses its default value. + // +optional + RefreshInterval *v1.Duration `json:"refreshInterval,omitempty"` + // fetchTimeout defines the time after which the monitoring assignments are refreshed. + // +optional + FetchTimeout *v1.Duration `json:"fetchTimeout,omitempty"` + // ProxyConfig allows customizing the proxy behaviour for this scrape config. + // +optional + v1.ProxyConfig `json:",inline"` + // tlsConfig defines the TLS configuration to connect to the Consul API. + // +optional + TLSConfig *v1.SafeTLSConfig `json:"tlsConfig,omitempty"` + // basicAuth defines information to use on every scrape request. + // +optional + BasicAuth *v1.BasicAuth `json:"basicAuth,omitempty"` + // authorization defines the header configuration to authenticate against the DigitalOcean API. + // Cannot be set at the same time as `oauth2`. + // +optional + Authorization *v1.SafeAuthorization `json:"authorization,omitempty"` + // oauth2 defines the configuration to use on every scrape request. + // +optional + OAuth2 *v1.OAuth2 `json:"oauth2,omitempty"` + // followRedirects defines whether HTTP requests follow HTTP 3xx redirects. + // +optional + FollowRedirects *bool `json:"followRedirects,omitempty"` // nolint:kubeapilinter + // enableHTTP2 defines whether to enable HTTP2. + // +optional + EnableHTTP2 *bool `json:"enableHTTP2,omitempty"` // nolint:kubeapilinter +} + +// Eureka SD configurations allow retrieving scrape targets using the Eureka REST API. +// Prometheus will periodically check the REST endpoint and create a target for every app instance. +// See https://prometheus.io/docs/prometheus/latest/configuration/configuration/#eureka_sd_config +// +k8s:openapi-gen=true +type EurekaSDConfig struct { + // server defines the URL to connect to the Eureka server. + // +kubebuilder:validation:Pattern:="^http(s)?://.+$" + // +kubebuilder:validation:MinLength=1 + // +required + Server string `json:"server"` + // basicAuth defines the BasicAuth information to use on every scrape request. + // +optional + BasicAuth *v1.BasicAuth `json:"basicAuth,omitempty"` + // authorization defines the header configuration to authenticate against the DigitalOcean API. + // Cannot be set at the same time as `oauth2`. + // +optional + Authorization *v1.SafeAuthorization `json:"authorization,omitempty"` + // oauth2 defines the configuration to use on every scrape request. + // +optional + OAuth2 *v1.OAuth2 `json:"oauth2,omitempty"` + // tlsConfig defines the TLS configuration to connect to the Consul API. + // +optional + TLSConfig *v1.SafeTLSConfig `json:"tlsConfig,omitempty"` + // ProxyConfig allows customizing the proxy behaviour for this scrape config. + // +optional + v1.ProxyConfig `json:",inline"` + // followRedirects defines whether HTTP requests follow HTTP 3xx redirects. + // +optional + FollowRedirects *bool `json:"followRedirects,omitempty"` // nolint:kubeapilinter + // enableHTTP2 defines whether to enable HTTP2. + // +optional + EnableHTTP2 *bool `json:"enableHTTP2,omitempty"` // nolint:kubeapilinter + // refreshInterval defines the time after which the provided names are refreshed. + // If not set, Prometheus uses its default value. + // +optional + RefreshInterval *v1.Duration `json:"refreshInterval,omitempty"` +} + +// Docker SD configurations allow retrieving scrape targets from Docker Engine hosts. +// This SD discovers "containers" and will create a target for each network IP and +// port the container is configured to expose. +// See https://prometheus.io/docs/prometheus/latest/configuration/configuration/#docker_sd_config +// +k8s:openapi-gen=true +type DockerSDConfig struct { + // host defines the address of the docker daemon + // +kubebuilder:validation:MinLength=1 + // +required + Host string `json:"host"` + // ProxyConfig allows customizing the proxy behaviour for this scrape config. + // +optional + v1.ProxyConfig `json:",inline"` + // tlsConfig defines the TLS configuration to connect to the Consul API. + // +optionals + TLSConfig *v1.SafeTLSConfig `json:"tlsConfig,omitempty"` + // port defines the port to scrape metrics from. If using the public IP address, this must + // +kubebuilder:validation:Minimum=0 + // +kubebuilder:validation:Maximum=65535 + // +optional + Port *int32 `json:"port,omitempty"` + // hostNetworkingHost defines the host to use if the container is in host networking mode. + // +kubebuilder:validation:MinLength=1 + // +optional + HostNetworkingHost *string `json:"hostNetworkingHost,omitempty"` + // matchFirstNetwork defines whether to match the first network if the container has multiple networks defined. + // If unset, Prometheus uses true by default. + // It requires Prometheus >= v2.54.1. + // + // +optional + MatchFirstNetwork *bool `json:"matchFirstNetwork,omitempty"` // nolint:kubeapilinter + // filters defines filters to limit the discovery process to a subset of the available resources. + // +optional + Filters Filters `json:"filters,omitempty"` + // refreshInterval defines the time after which the provided names are refreshed. + // If not set, Prometheus uses its default value. + // +optional + RefreshInterval *v1.Duration `json:"refreshInterval,omitempty"` + // basicAuth defines information to use on every scrape request. + // +optional + BasicAuth *v1.BasicAuth `json:"basicAuth,omitempty"` + // authorization defines the header configuration to authenticate against the DigitalOcean API. + // Cannot be set at the same time as `oauth2`. + // +optional + Authorization *v1.SafeAuthorization `json:"authorization,omitempty"` + // oauth2 defines the configuration to use on every scrape request. + // +optional + OAuth2 *v1.OAuth2 `json:"oauth2,omitempty"` + // followRedirects defines whether HTTP requests follow HTTP 3xx redirects. + // +optional + FollowRedirects *bool `json:"followRedirects,omitempty"` // nolint:kubeapilinter + // enableHTTP2 defines whether to enable HTTP2. + // +optional + EnableHTTP2 *bool `json:"enableHTTP2,omitempty"` // nolint:kubeapilinter +} + +// HetznerSDConfig allow retrieving scrape targets from Hetzner Cloud API and Robot API. +// This service discovery uses the public IPv4 address by default, but that can be changed with relabeling +// See https://prometheus.io/docs/prometheus/latest/configuration/configuration/#hetzner_sd_config +// +k8s:openapi-gen=true +type HetznerSDConfig struct { + // role defines the Hetzner role of entities that should be discovered. + // +kubebuilder:validation:Enum=hcloud;Hcloud;robot;Robot + // +required + Role string `json:"role"` + // basicAuth defines information to use on every scrape request. + // +optional + BasicAuth *v1.BasicAuth `json:"basicAuth,omitempty"` + // authorization defines the header configuration to authenticate against the DigitalOcean API. + // Cannot be set at the same time as `oauth2`. + // +optional + Authorization *v1.SafeAuthorization `json:"authorization,omitempty"` + // oauth2 defines the configuration to use on every scrape request. + // +optional + OAuth2 *v1.OAuth2 `json:"oauth2,omitempty"` + // ProxyConfig allows customizing the proxy behaviour for this scrape config. + // +optional + v1.ProxyConfig `json:",inline"` + // followRedirects defines whether HTTP requests follow HTTP 3xx redirects. + // +optional + FollowRedirects *bool `json:"followRedirects,omitempty"` // nolint:kubeapilinter + // enableHTTP2 defines whether to enable HTTP2. + // +optional + EnableHTTP2 *bool `json:"enableHTTP2,omitempty"` // nolint:kubeapilinter + // tlsConfig defines the TLS configuration to connect to the Consul API. + // +optional + TLSConfig *v1.SafeTLSConfig `json:"tlsConfig,omitempty"` + // port defines the port to scrape metrics from. If using the public IP address, this must + // +kubebuilder:validation:Minimum=0 + // +kubebuilder:validation:Maximum=65535 + // +optional + Port *int32 `json:"port,omitempty"` + // refreshInterval defines the time after which the provided names are refreshed. + // If not set, Prometheus uses its default value. + // +optional + RefreshInterval *v1.Duration `json:"refreshInterval,omitempty"` + // labelSelector defines the label selector used to filter the servers when fetching them from the API. + // It requires Prometheus >= v3.5.0. + // +kubebuilder:validation:MinLength=1 + // +optional + LabelSelector *string `json:"labelSelector,omitempty"` +} + +// NomadSDConfig configurations allow retrieving scrape targets from Nomad's Service API. +// See https://prometheus.io/docs/prometheus/latest/configuration/configuration/#nomad_sd_config +// +k8s:openapi-gen=true +type NomadSDConfig struct { + // allowStale defines the information to access the Nomad API. It is to be defined + // as the Nomad documentation requires. + // +optional + AllowStale *bool `json:"allowStale,omitempty"` // nolint:kubeapilinter + // namespace defines the Nomad namespace to query for service discovery. + // When specified, only resources within this namespace will be discovered. + // +optional + Namespace *string `json:"namespace,omitempty"` + // refreshInterval defines the time after which the provided names are refreshed. + // If not set, Prometheus uses its default value. + // +optional + RefreshInterval *v1.Duration `json:"refreshInterval,omitempty"` + // region defines the Nomad region to query for service discovery. + // When specified, only resources within this region will be discovered. + // +optional + Region *string `json:"region,omitempty"` + // server defines the Nomad server address to connect to for service discovery. + // This should be the full URL including protocol (e.g., "https://nomad.example.com:4646"). + // +kubebuilder:validation:MinLength=1 + // +required + Server string `json:"server"` + // tagSeparator defines the separator used to join multiple tags. + // This determines how Nomad service tags are concatenated into Prometheus labels. + // +optional + TagSeparator *string `json:"tagSeparator,omitempty"` + // basicAuth defines information to use on every scrape request. + // +optional + BasicAuth *v1.BasicAuth `json:"basicAuth,omitempty"` + // authorization defines the header configuration to authenticate against the DigitalOcean API. + // Cannot be set at the same time as `oauth2`. + // +optional + Authorization *v1.SafeAuthorization `json:"authorization,omitempty"` + // oauth2 defines the configuration to use on every scrape request. + // +optional + OAuth2 *v1.OAuth2 `json:"oauth2,omitempty"` + // tlsConfig defines the TLS configuration to connect to the Consul API. + // +optional + TLSConfig *v1.SafeTLSConfig `json:"tlsConfig,omitempty"` + // ProxyConfig allows customizing the proxy behaviour for this scrape config. + // +optional + v1.ProxyConfig `json:",inline"` + // followRedirects defines whether HTTP requests follow HTTP 3xx redirects. + // +optional + FollowRedirects *bool `json:"followRedirects,omitempty"` // nolint:kubeapilinter + // enableHTTP2 defines whether to enable HTTP2. + // +optional + EnableHTTP2 *bool `json:"enableHTTP2,omitempty"` // nolint:kubeapilinter +} + +// Service of the targets to retrieve. Must be `VPS` or `DedicatedServer`. +// +kubebuilder:validation:Enum=VPS;DedicatedServer +type OVHService string + +const ( + OVHServiceVPS OVHService = "VPS" + OVHServiceDedicatedServer OVHService = "DedicatedServer" +) + +// OVHCloudSDConfig configurations allow retrieving scrape targets from OVHcloud's dedicated servers and VPS using their API. +// See https://prometheus.io/docs/prometheus/latest/configuration/configuration/#ovhcloud_sd_config +// +k8s:openapi-gen=true +type OVHCloudSDConfig struct { + // applicationKey defines the access key to use for OVHCloud API authentication. + // This is obtained from the OVHCloud API credentials at https://api.ovh.com. + // +kubebuilder:validation:MinLength=1 + // +required + ApplicationKey string `json:"applicationKey"` + // applicationSecret defines the secret key for OVHCloud API authentication. + // This contains the application secret obtained during OVHCloud API credential creation. + // +required + ApplicationSecret corev1.SecretKeySelector `json:"applicationSecret"` + // consumerKey defines the consumer key for OVHCloud API authentication. + // This is the third component of OVHCloud's three-key authentication system. + // +required + ConsumerKey corev1.SecretKeySelector `json:"consumerKey"` + // service defines the service type of the targets to retrieve. + // Must be either `VPS` or `DedicatedServer` to specify which OVHCloud resources to discover. + // +required + Service OVHService `json:"service"` + // endpoint defines a custom API endpoint to be used. + // When not specified, defaults to the standard OVHCloud API endpoint for the region. + // +kubebuilder:validation:MinLength=1 + // +optional + Endpoint *string `json:"endpoint,omitempty"` + // refreshInterval defines the time after which the provided names are refreshed. + // If not set, Prometheus uses its default value. + // +optional + RefreshInterval *v1.Duration `json:"refreshInterval,omitempty"` +} + +// DockerSwarmSDConfig configurations allow retrieving scrape targets from Docker Swarm engine. +// See https://prometheus.io/docs/prometheus/latest/configuration/configuration/#dockerswarm_sd_config +// +k8s:openapi-gen=true +type DockerSwarmSDConfig struct { + // host defines the address of the Docker daemon + // +kubebuilder:validation:Pattern="^[a-zA-Z][a-zA-Z0-9+.-]*://.+$" + // +required + Host string `json:"host"` + // role of the targets to retrieve. Must be `Services`, `Tasks`, or `Nodes`. + // +kubebuilder:validation:Enum=Services;Tasks;Nodes + // +required + Role string `json:"role"` + // port defines the port to scrape metrics from. If using the public IP address, this must + // tasks and services that don't have published ports. + // +kubebuilder:validation:Minimum=0 + // +kubebuilder:validation:Maximum=65535 + // +optional + Port *int32 `json:"port,omitempty"` + // filters defines the filters to limit the discovery process to a subset of available + // resources. + // The available filters are listed in the upstream documentation: + // Services: https://docs.docker.com/engine/api/v1.40/#operation/ServiceList + // Tasks: https://docs.docker.com/engine/api/v1.40/#operation/TaskList + // Nodes: https://docs.docker.com/engine/api/v1.40/#operation/NodeList + // +optional + Filters Filters `json:"filters,omitempty"` + // refreshInterval defines the time after which the provided names are refreshed. + // If not set, Prometheus uses its default value. + // +optional + RefreshInterval *v1.Duration `json:"refreshInterval,omitempty"` + // basicAuth defines information to use on every scrape request. + // +optional + BasicAuth *v1.BasicAuth `json:"basicAuth,omitempty"` + // authorization defines the header configuration to authenticate against the DigitalOcean API. + // Cannot be set at the same time as `oauth2`. + // +optional + Authorization *v1.SafeAuthorization `json:"authorization,omitempty"` + // oauth2 defines the optional OAuth 2.0 configuration to authenticate against the target HTTP endpoint. + // Cannot be set at the same time as `authorization`, or `basicAuth`. + // +optional + OAuth2 *v1.OAuth2 `json:"oauth2,omitempty"` + v1.ProxyConfig `json:",inline"` + // tlsConfig defines the TLS configuration to connect to the Consul API. + // +optional + TLSConfig *v1.SafeTLSConfig `json:"tlsConfig,omitempty"` + // followRedirects defines whether HTTP requests follow HTTP 3xx redirects. + // +optional + FollowRedirects *bool `json:"followRedirects,omitempty"` // nolint:kubeapilinter + // enableHTTP2 defines whether to enable HTTP2. + // +optional + EnableHTTP2 *bool `json:"enableHTTP2,omitempty"` // nolint:kubeapilinter +} + +// LinodeSDConfig configurations allow retrieving scrape targets from Linode's Linode APIv4. +// See https://prometheus.io/docs/prometheus/latest/configuration/configuration/#linode_sd_config +// +k8s:openapi-gen=true +type LinodeSDConfig struct { + // region defines the region to filter on. + // +kubebuilder:validation:MinLength=1 + // +optional + Region *string `json:"region,omitempty"` + // port defines the port to scrape metrics from. If using the public IP address, this must + // +kubebuilder:validation:Minimum=0 + // +kubebuilder:validation:Maximum=65535 + // +optional + Port *int32 `json:"port,omitempty"` + // tagSeparator defines the string by which Linode Instance tags are joined into the tag label.el. + // +kubebuilder:validation:MinLength=1 + // +optional + TagSeparator *string `json:"tagSeparator,omitempty"` + // refreshInterval defines the time after which the provided names are refreshed. + // If not set, Prometheus uses its default value. + // +optional + RefreshInterval *v1.Duration `json:"refreshInterval,omitempty"` + // authorization defines the header configuration to authenticate against the DigitalOcean API. + // Cannot be set at the same time as `oauth2`. + // +optional + Authorization *v1.SafeAuthorization `json:"authorization,omitempty"` + // oauth2 defines the optional OAuth 2.0 configuration to authenticate against the target HTTP endpoint. + // Cannot be set at the same time as `authorization`, or `basicAuth`. + // +optional + OAuth2 *v1.OAuth2 `json:"oauth2,omitempty"` + v1.ProxyConfig `json:",inline"` + // followRedirects defines whether HTTP requests follow HTTP 3xx redirects. + // +optional + FollowRedirects *bool `json:"followRedirects,omitempty"` // nolint:kubeapilinter + // tlsConfig defines the TLS configuration to connect to the Consul API. + // +optional + TLSConfig *v1.SafeTLSConfig `json:"tlsConfig,omitempty"` + // enableHTTP2 defines whether to enable HTTP2. + // +optional + EnableHTTP2 *bool `json:"enableHTTP2,omitempty"` // nolint:kubeapilinter +} + +// PuppetDBSDConfig configurations allow retrieving scrape targets from PuppetDB resources. +// See https://prometheus.io/docs/prometheus/latest/configuration/configuration/#puppetdb_sd_config +type PuppetDBSDConfig struct { + // url defines the URL of the PuppetDB root query endpoint. + // +kubebuilder:validation:MinLength:=1 + // +kubebuilder:validation:Pattern:="^http(s)?://.+$" + // +required + URL string `json:"url"` + // query defines the Puppet Query Language (PQL) query. Only resources are supported. + // https://puppet.com/docs/puppetdb/latest/api/query/v4/pql.html + // +kubebuilder:validation:MinLength=1 + // +required + Query string `json:"query"` + // includeParameters defines whether to include the parameters as meta labels. + // Note: Enabling this exposes parameters in the Prometheus UI and API. Make sure + // that you don't have secrets exposed as parameters if you enable this. + // +optional + IncludeParameters *bool `json:"includeParameters,omitempty"` // nolint:kubeapilinter + // refreshInterval defines the time after which the provided names are refreshed. + // If not set, Prometheus uses its default value. + // +optional + RefreshInterval *v1.Duration `json:"refreshInterval,omitempty"` + // port defines the port to scrape metrics from. If using the public IP address, this must + // +kubebuilder:validation:Minimum=0 + // +kubebuilder:validation:Maximum=65535 + // +optional + Port *int32 `json:"port,omitempty"` + // basicAuth defines information to use on every scrape request. + // Cannot be set at the same time as `authorization`, or `oauth2`. + // +optional + BasicAuth *v1.BasicAuth `json:"basicAuth,omitempty"` + // authorization defines the header configuration to authenticate against the DigitalOcean API. + // Cannot be set at the same time as `oauth2`. + // +optional + Authorization *v1.SafeAuthorization `json:"authorization,omitempty"` + // oauth2 defines the optional OAuth 2.0 configuration to authenticate against the target HTTP endpoint. + // Cannot be set at the same time as `authorization`, or `basicAuth`. + // +optional + OAuth2 *v1.OAuth2 `json:"oauth2,omitempty"` + v1.ProxyConfig `json:",inline"` + // tlsConfig defines the TLS configuration to connect to the Consul API. + // +optional + TLSConfig *v1.SafeTLSConfig `json:"tlsConfig,omitempty"` + // followRedirects defines whether HTTP requests follow HTTP 3xx redirects. + // +optional + FollowRedirects *bool `json:"followRedirects,omitempty"` // nolint:kubeapilinter + // enableHTTP2 defines whether to enable HTTP2. + // +optional + EnableHTTP2 *bool `json:"enableHTTP2,omitempty"` // nolint:kubeapilinter +} + +// LightSailSDConfig configurations allow retrieving scrape targets from AWS Lightsail instances. +// See https://prometheus.io/docs/prometheus/latest/configuration/configuration/#lightsail_sd_config +// TODO: Need to document that we will not be supporting the `_file` fields. +type LightSailSDConfig struct { + // region defines the AWS region. + // +kubebuilder:validation:MinLength=1 + // +optional + Region *string `json:"region,omitempty"` + // accessKey defines the AWS API key. + // +optional + AccessKey *corev1.SecretKeySelector `json:"accessKey,omitempty"` + // secretKey defines the AWS API secret. + // +optional + SecretKey *corev1.SecretKeySelector `json:"secretKey,omitempty"` + // roleARN defines the AWS Role ARN, an alternative to using AWS API keys. + // +optional + RoleARN *string `json:"roleARN,omitempty"` + // endpoint defines the custom endpoint to be used. + // +kubebuilder:validation:MinLength=1 + // +optional + Endpoint *string `json:"endpoint,omitempty"` + // refreshInterval defines the time after which the provided names are refreshed. + // If not set, Prometheus uses its default value. + // +optional + RefreshInterval *v1.Duration `json:"refreshInterval,omitempty"` + // port defines the port to scrape metrics from. If using the public IP address, this must + // +kubebuilder:validation:Minimum=0 + // +kubebuilder:validation:Maximum=65535 + // +optional + Port *int32 `json:"port,omitempty"` + // basicAuth defines information to use on every scrape request. + // Cannot be set at the same time as `authorization`, or `oauth2`. + // +optional + BasicAuth *v1.BasicAuth `json:"basicAuth,omitempty"` + // authorization defines the header configuration to authenticate against the DigitalOcean API. + // Cannot be set at the same time as `oauth2`. + // +optional + Authorization *v1.SafeAuthorization `json:"authorization,omitempty"` + // oauth2 defines the optional OAuth 2.0 configuration to authenticate against the target HTTP endpoint. + // Cannot be set at the same time as `authorization`, or `basicAuth`. + // +optional + OAuth2 *v1.OAuth2 `json:"oauth2,omitempty"` + v1.ProxyConfig `json:",inline"` + // tlsConfig defines the TLS configuration to connect to the Consul API. + // +optional + TLSConfig *v1.SafeTLSConfig `json:"tlsConfig,omitempty"` + // followRedirects defines whether HTTP requests follow HTTP 3xx redirects. + // +optional + FollowRedirects *bool `json:"followRedirects,omitempty"` // nolint:kubeapilinter + // enableHTTP2 defines whether to enable HTTP2. + // +optional + EnableHTTP2 *bool `json:"enableHTTP2,omitempty"` // nolint:kubeapilinter +} + +// Role of the targets to retrieve. Must be `Instance` or `Baremetal`. +// +kubebuilder:validation:Enum=Instance;Baremetal +type ScalewayRole string + +const ( + ScalewayRoleInstance ScalewayRole = "Instance" + ScalewayRoleBaremetal ScalewayRole = "Baremetal" +) + +// ScalewaySDConfig configurations allow retrieving scrape targets from Scaleway instances and baremetal services. +// See https://prometheus.io/docs/prometheus/latest/configuration/configuration/#scaleway_sd_config +// TODO: Need to document that we will not be supporting the `_file` fields. +type ScalewaySDConfig struct { + // accessKey defines the access key to use. https://console.scaleway.com/project/credentials + // +kubebuilder:validation:MinLength=1 + // +required + AccessKey string `json:"accessKey"` + // secretKey defines the secret key to use when listing targets. + // +required + SecretKey corev1.SecretKeySelector `json:"secretKey"` + // projectID defines the Project ID of the targets. + // +kubebuilder:validation:MinLength=1 + // +required + ProjectID string `json:"projectID"` + // role defines the service of the targets to retrieve. Must be `Instance` or `Baremetal`. + // +required + Role ScalewayRole `json:"role"` + // port defines the port to scrape metrics from. If using the public IP address, this must + // +kubebuilder:validation:Minimum=0 + // +kubebuilder:validation:Maximum=65535 + // +optional + Port *int32 `json:"port,omitempty"` + // apiURL defines the API URL to use when doing the server listing requests. + // +kubebuilder:validation:Pattern:="^http(s)?://.+$" + // +optional + ApiURL *string `json:"apiURL,omitempty"` + // zone defines the availability zone of your targets (e.g. fr-par-1). + // +kubebuilder:validation:MinLength=1 + // +optional + Zone *string `json:"zone,omitempty"` + // nameFilter defines a name filter (works as a LIKE) to apply on the server listing request. + // +kubebuilder:validation:MinLength=1 + // +optional + NameFilter *string `json:"nameFilter,omitempty"` + // tagsFilter defines a tag filter (a server needs to have all defined tags to be listed) to apply on the server listing request. + // +kubebuilder:validation:MinItems=1 + // +kubebuilder:validation:items:MinLength=1 + // +listType=set + // +optional + TagsFilter []string `json:"tagsFilter,omitempty"` + // refreshInterval defines the time after which the provided names are refreshed. + // If not set, Prometheus uses its default value. + // +optional + RefreshInterval *v1.Duration `json:"refreshInterval,omitempty"` + // +optional + v1.ProxyConfig `json:",inline"` + // followRedirects defines whether HTTP requests follow HTTP 3xx redirects. + // +optional + FollowRedirects *bool `json:"followRedirects,omitempty"` // nolint:kubeapilinter + // enableHTTP2 defines whether to enable HTTP2. + // +optional + EnableHTTP2 *bool `json:"enableHTTP2,omitempty"` // nolint:kubeapilinter + // tlsConfig defines the TLS configuration to connect to the Consul API. + // +optional + TLSConfig *v1.SafeTLSConfig `json:"tlsConfig,omitempty"` +} + +// IonosSDConfig configurations allow retrieving scrape targets from IONOS resources. +// See https://prometheus.io/docs/prometheus/latest/configuration/configuration/#ionos_sd_config +type IonosSDConfig struct { + // datacenterID defines the unique ID of the IONOS data center. + // +kubebuilder:validation:MinLength=1 + // +required + DataCenterID string `json:"datacenterID"` + // port defines the port to scrape metrics from. If using the public IP address, this must + // +kubebuilder:validation:Minimum=0 + // +kubebuilder:validation:Maximum=65535 + // +optional + Port *int32 `json:"port,omitempty"` + // refreshInterval defines the time after which the provided names are refreshed. + // If not set, Prometheus uses its default value. + // +optional + RefreshInterval *v1.Duration `json:"refreshInterval,omitempty"` + // authorization defines the header configuration to authenticate against the IONOS. + // Cannot be set at the same time as `oauth2`. + // +required + Authorization v1.SafeAuthorization `json:"authorization"` + v1.ProxyConfig `json:",inline"` + // tlsConfig defines the TLS configuration to connect to the Consul API. + // +optional + TLSConfig *v1.SafeTLSConfig `json:"tlsConfig,omitempty"` + // followRedirects defines whether HTTP requests follow HTTP 3xx redirects. + // +optional + FollowRedirects *bool `json:"followRedirects,omitempty"` // nolint:kubeapilinter + // enableHTTP2 defines whether to enable HTTP2. + // +optional + EnableHTTP2 *bool `json:"enableHTTP2,omitempty"` // nolint:kubeapilinter + // oauth2 defines the configuration to use on every scrape request. + // +optional + OAuth2 *v1.OAuth2 `json:"oauth2,omitempty"` +} diff --git a/vendor/github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring/v1alpha1/validation.go b/vendor/github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring/v1alpha1/validation.go new file mode 100644 index 0000000000..f4ad418b79 --- /dev/null +++ b/vendor/github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring/v1alpha1/validation.go @@ -0,0 +1,356 @@ +// Copyright 2021 The prometheus-operator Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package v1alpha1 + +import ( + "errors" + "fmt" + "regexp" + "strconv" + "strings" +) + +func (hc *HTTPConfig) Validate() error { + if hc == nil { + return nil + } + + if (hc.BasicAuth != nil || hc.OAuth2 != nil) && (hc.BearerTokenSecret != nil) { + return fmt.Errorf("at most one of basicAuth, oauth2, bearerTokenSecret must be configured") + } + + if hc.Authorization != nil { + if hc.BearerTokenSecret != nil { + return fmt.Errorf("authorization is not compatible with bearerTokenSecret") + } + + if hc.BasicAuth != nil || hc.OAuth2 != nil { + return fmt.Errorf("at most one of basicAuth, oauth2 & authorization must be configured") + } + + if err := hc.Authorization.Validate(); err != nil { + return err + } + } + + if hc.OAuth2 != nil { + if hc.BasicAuth != nil { + return fmt.Errorf("at most one of basicAuth, oauth2 & authorization must be configured") + } + + if err := hc.OAuth2.Validate(); err != nil { + return err + } + } + + if hc.TLSConfig != nil { + if err := hc.TLSConfig.Validate(); err != nil { + return err + } + } + + if err := hc.ProxyConfig.Validate(); err != nil { + return err + } + + return nil +} + +// Validate the MuteTimeInterval +func (mti MuteTimeInterval) Validate() error { + if mti.Name == "" { + return errors.New("empty name field for mute time interval") + } + + for i, ti := range mti.TimeIntervals { + for _, time := range ti.Times { + if err := time.Validate(); err != nil { + return fmt.Errorf("time range at %d is invalid: %w", i, err) + } + } + for _, weekday := range ti.Weekdays { + if err := weekday.Validate(); err != nil { + return fmt.Errorf("weekday range at %d is invalid: %w", i, err) + } + } + for _, dom := range ti.DaysOfMonth { + if err := dom.Validate(); err != nil { + return fmt.Errorf("mute time interval is invalid - day of month range at %d is invalid: %w", i, err) + } + } + for _, month := range ti.Months { + if err := month.Validate(); err != nil { + return fmt.Errorf("month range at %d is invalid: %w", i, err) + } + } + for _, year := range ti.Years { + if err := year.Validate(); err != nil { + return fmt.Errorf("year range at %d is invalid: %w", i, err) + } + } + } + return nil +} + +// Validate the TimeRange +func (tr TimeRange) Validate() error { + _, err := tr.Parse() + return err +} + +// Parse returns a ParsedRange on valid input or an error if the fields cannot be parsed +// End of the day is represented as 1440. +func (tr TimeRange) Parse() (*ParsedRange, error) { + if tr.StartTime == "" || tr.EndTime == "" { + return nil, fmt.Errorf("start and end are required") + } + + start, err := parseTime(string(tr.StartTime)) + if err != nil { + return nil, fmt.Errorf("start time invalid: %w", err) + } + + end, err := parseTime(string(tr.EndTime)) + if err != nil { + return nil, fmt.Errorf("end time invalid: %w", err) + } + + if start >= end { + return nil, fmt.Errorf("start time %d cannot be equal or greater than end time %d", start, end) + } + return &ParsedRange{ + Start: start, + End: end, + }, nil +} + +// Validate the WeekdayRange +func (wr WeekdayRange) Validate() error { + _, err := wr.Parse() + return err +} + +// Parse returns a ParsedRange on valid input or an error if the fields cannot be parsed +// The week starts on Sunday -> 0 +func (wr WeekdayRange) Parse() (*ParsedRange, error) { + startStr, endStr, err := parseRange(string(wr)) + if err != nil { + return nil, err + } + + start, err := Weekday(startStr).Int() + if err != nil { + return nil, fmt.Errorf("failed to parse start day from weekday range: %w", err) + } + + end, err := Weekday(endStr).Int() + if err != nil { + return nil, fmt.Errorf("failed to parse end day from weekday range: %w", err) + } + + if start > end { + return nil, errors.New("start day cannot be before end day") + } + if start < 0 || start > 6 { + return nil, fmt.Errorf("%s is not a valid day of the week: out of range", startStr) + } + if end < 0 || end > 6 { + return nil, fmt.Errorf("%s is not a valid day of the week: out of range", endStr) + } + return &ParsedRange{Start: start, End: end}, nil +} + +// Validate the YearRange +func (yr YearRange) Validate() error { + _, err := yr.Parse() + return err +} + +// Parse returns a ParsedRange on valid input or an error if the fields cannot be parsed +func (yr YearRange) Parse() (*ParsedRange, error) { + startStr, endStr, err := parseRange(string(yr)) + if err != nil { + return nil, err + } + + start, err := strconv.Atoi(startStr) + if err != nil { + return nil, fmt.Errorf("start year cannot be %s parsed: %w", startStr, err) + } + + end, err := strconv.Atoi(endStr) + if err != nil { + return nil, fmt.Errorf("end year cannot be %s parsed: %w", endStr, err) + } + + if start > end { + return nil, fmt.Errorf("end year %d is before start year %d", end, start) + } + return &ParsedRange{Start: start, End: end}, nil +} + +// Int returns an integer, which is the canonical representation +// of the Weekday in upstream types. +// Returns an error if the Weekday is invalid +func (w Weekday) Int() (int, error) { + normaliseWeekday := Weekday(strings.ToLower(string(w))) + + day, found := daysOfWeek[normaliseWeekday] + if !found { + i, err := strconv.Atoi(string(normaliseWeekday)) + if err != nil { + return day, fmt.Errorf("%s is an invalid weekday", w) + } + day = i + } + + return day, nil +} + +// Int validates the Month and returns an integer, which is the canonical representation +// of the Month in upstream types. +// Returns an error if the Month is invalid +func (m Month) Int() (int, error) { + normaliseMonth := Month(strings.ToLower(string(m))) + + month, found := months[normaliseMonth] + if !found { + i, err := strconv.Atoi(string(normaliseMonth)) + if err != nil || i < 1 || i > 12 { + return month, fmt.Errorf("%s is an invalid month", m) + } + month = i + } + + return month, nil +} + +// Validate the DayOfMonthRange +func (r DayOfMonthRange) Validate() error { + // Note: Validation is copied from UnmarshalYAML for DayOfMonthRange in alertmanager repo + + // Check beginning <= end accounting for negatives day of month indices as well. + // Months != 31 days can't be addressed here and are clamped, but at least we can catch blatant errors. + if r.Start == 0 || r.Start < -31 || r.Start > 31 { + return fmt.Errorf("%d is not a valid day of the month: out of range", r.Start) + } + if r.End == 0 || r.End < -31 || r.End > 31 { + return fmt.Errorf("%d is not a valid day of the month: out of range", r.End) + } + // Restricting here prevents errors where begin > end in longer months but not shorter months. + if r.Start < 0 && r.End > 0 { + return fmt.Errorf("end day must be negative if start day is negative") + } + // Check begin <= end. We can't know this for sure when using negative indices, + // but we can prevent cases where its always invalid (using 28 day minimum length). + checkBegin := r.Start + checkEnd := r.End + if r.Start < 0 { + checkBegin = 28 + r.Start + } + if r.End < 0 { + checkEnd = 28 + r.End + } + if checkBegin > checkEnd { + return fmt.Errorf("end day %d is always before start day %d", r.End, r.Start) + } + return nil +} + +// Validate the month range +func (mr MonthRange) Validate() error { + _, err := mr.Parse() + return err +} + +// Parse returns a ParsedMonthRange or error on invalid input +func (mr MonthRange) Parse() (*ParsedRange, error) { + startStr, endStr, err := parseRange(string(mr)) + if err != nil { + return nil, err + } + + start, err := Month(startStr).Int() + if err != nil { + return nil, fmt.Errorf("failed to parse start month from month range: %w", err) + } + + end, err := Month(endStr).Int() + if err != nil { + return nil, fmt.Errorf("failed to parse start month from month range: %w", err) + } + + if start > end { + return nil, fmt.Errorf("end month %s is before start month %s", endStr, startStr) + } + return &ParsedRange{ + Start: start, + End: end, + }, nil +} + +// ParsedRange is an integer representation of a range +// +kubebuilder:object:generate:=false +type ParsedRange struct { + // start defines the beginning of the range + // +optional + Start int `json:"start,omitempty"` + // end defines the end of the range + // +optional + End int `json:"end,omitempty"` +} + +var ( + validTime = "^((([01][0-9])|(2[0-3])):[0-5][0-9])$|(^24:00$)" + validTimeRE = regexp.MustCompile(validTime) +) + +// Converts a string of the form "HH:MM" into the number of minutes elapsed in the day. +func parseTime(in string) (mins int, err error) { + if !validTimeRE.MatchString(in) { + return 0, fmt.Errorf("couldn't parse timestamp %s, invalid format", in) + } + timestampComponents := strings.Split(in, ":") + if len(timestampComponents) != 2 { + return 0, fmt.Errorf("invalid timestamp format: %s", in) + } + timeStampHours, err := strconv.Atoi(timestampComponents[0]) + if err != nil { + return 0, err + } + timeStampMinutes, err := strconv.Atoi(timestampComponents[1]) + if err != nil { + return 0, err + } + if timeStampHours < 0 || timeStampHours > 24 || timeStampMinutes < 0 || timeStampMinutes > 60 { + return 0, fmt.Errorf("timestamp %s out of range", in) + } + // Timestamps are stored as minutes elapsed in the day, so multiply hours by 60. + mins = timeStampHours*60 + timeStampMinutes + return mins, nil +} + +// parseRange parses a valid range string into parts +func parseRange(in string) (start, end string, err error) { + if !strings.ContainsRune(in, ':') { + return in, in, nil + } + + parts := strings.Split(string(in), ":") + if len(parts) != 2 { + return start, end, fmt.Errorf("invalid range provided %s", in) + } + return parts[0], parts[1], nil +} diff --git a/vendor/github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring/v1alpha1/zz_generated.deepcopy.go b/vendor/github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring/v1alpha1/zz_generated.deepcopy.go new file mode 100644 index 0000000000..8c96080826 --- /dev/null +++ b/vendor/github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring/v1alpha1/zz_generated.deepcopy.go @@ -0,0 +1,3531 @@ +//go:build !ignore_autogenerated + +// Copyright The prometheus-operator Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Code generated by controller-gen. DO NOT EDIT. + +package v1alpha1 + +import ( + monitoringv1 "github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring/v1" + corev1 "k8s.io/api/core/v1" + "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1" +) + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AlertmanagerConfig) DeepCopyInto(out *AlertmanagerConfig) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AlertmanagerConfig. +func (in *AlertmanagerConfig) DeepCopy() *AlertmanagerConfig { + if in == nil { + return nil + } + out := new(AlertmanagerConfig) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AlertmanagerConfigList) DeepCopyInto(out *AlertmanagerConfigList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]AlertmanagerConfig, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AlertmanagerConfigList. +func (in *AlertmanagerConfigList) DeepCopy() *AlertmanagerConfigList { + if in == nil { + return nil + } + out := new(AlertmanagerConfigList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AlertmanagerConfigSpec) DeepCopyInto(out *AlertmanagerConfigSpec) { + *out = *in + if in.Route != nil { + in, out := &in.Route, &out.Route + *out = new(Route) + (*in).DeepCopyInto(*out) + } + if in.Receivers != nil { + in, out := &in.Receivers, &out.Receivers + *out = make([]Receiver, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.InhibitRules != nil { + in, out := &in.InhibitRules, &out.InhibitRules + *out = make([]InhibitRule, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.MuteTimeIntervals != nil { + in, out := &in.MuteTimeIntervals, &out.MuteTimeIntervals + *out = make([]MuteTimeInterval, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AlertmanagerConfigSpec. +func (in *AlertmanagerConfigSpec) DeepCopy() *AlertmanagerConfigSpec { + if in == nil { + return nil + } + out := new(AlertmanagerConfigSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AttachMetadata) DeepCopyInto(out *AttachMetadata) { + *out = *in + if in.Node != nil { + in, out := &in.Node, &out.Node + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AttachMetadata. +func (in *AttachMetadata) DeepCopy() *AttachMetadata { + if in == nil { + return nil + } + out := new(AttachMetadata) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AzureSDConfig) DeepCopyInto(out *AzureSDConfig) { + *out = *in + if in.Environment != nil { + in, out := &in.Environment, &out.Environment + *out = new(string) + **out = **in + } + if in.AuthenticationMethod != nil { + in, out := &in.AuthenticationMethod, &out.AuthenticationMethod + *out = new(AuthenticationMethodType) + **out = **in + } + if in.TenantID != nil { + in, out := &in.TenantID, &out.TenantID + *out = new(string) + **out = **in + } + if in.ClientID != nil { + in, out := &in.ClientID, &out.ClientID + *out = new(string) + **out = **in + } + if in.ClientSecret != nil { + in, out := &in.ClientSecret, &out.ClientSecret + *out = new(corev1.SecretKeySelector) + (*in).DeepCopyInto(*out) + } + if in.ResourceGroup != nil { + in, out := &in.ResourceGroup, &out.ResourceGroup + *out = new(string) + **out = **in + } + if in.RefreshInterval != nil { + in, out := &in.RefreshInterval, &out.RefreshInterval + *out = new(monitoringv1.Duration) + **out = **in + } + if in.Port != nil { + in, out := &in.Port, &out.Port + *out = new(int32) + **out = **in + } + if in.BasicAuth != nil { + in, out := &in.BasicAuth, &out.BasicAuth + *out = new(monitoringv1.BasicAuth) + (*in).DeepCopyInto(*out) + } + if in.Authorization != nil { + in, out := &in.Authorization, &out.Authorization + *out = new(monitoringv1.SafeAuthorization) + (*in).DeepCopyInto(*out) + } + if in.OAuth2 != nil { + in, out := &in.OAuth2, &out.OAuth2 + *out = new(monitoringv1.OAuth2) + (*in).DeepCopyInto(*out) + } + in.ProxyConfig.DeepCopyInto(&out.ProxyConfig) + if in.FollowRedirects != nil { + in, out := &in.FollowRedirects, &out.FollowRedirects + *out = new(bool) + **out = **in + } + if in.EnableHTTP2 != nil { + in, out := &in.EnableHTTP2, &out.EnableHTTP2 + *out = new(bool) + **out = **in + } + if in.TLSConfig != nil { + in, out := &in.TLSConfig, &out.TLSConfig + *out = new(monitoringv1.SafeTLSConfig) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AzureSDConfig. +func (in *AzureSDConfig) DeepCopy() *AzureSDConfig { + if in == nil { + return nil + } + out := new(AzureSDConfig) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ConsulSDConfig) DeepCopyInto(out *ConsulSDConfig) { + *out = *in + if in.PathPrefix != nil { + in, out := &in.PathPrefix, &out.PathPrefix + *out = new(string) + **out = **in + } + if in.TokenRef != nil { + in, out := &in.TokenRef, &out.TokenRef + *out = new(corev1.SecretKeySelector) + (*in).DeepCopyInto(*out) + } + if in.Datacenter != nil { + in, out := &in.Datacenter, &out.Datacenter + *out = new(string) + **out = **in + } + if in.Namespace != nil { + in, out := &in.Namespace, &out.Namespace + *out = new(string) + **out = **in + } + if in.Partition != nil { + in, out := &in.Partition, &out.Partition + *out = new(string) + **out = **in + } + if in.Scheme != nil { + in, out := &in.Scheme, &out.Scheme + *out = new(monitoringv1.Scheme) + **out = **in + } + if in.Services != nil { + in, out := &in.Services, &out.Services + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.TagSeparator != nil { + in, out := &in.TagSeparator, &out.TagSeparator + *out = new(string) + **out = **in + } + if in.NodeMeta != nil { + in, out := &in.NodeMeta, &out.NodeMeta + *out = make(map[string]string, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } + if in.Filter != nil { + in, out := &in.Filter, &out.Filter + *out = new(string) + **out = **in + } + if in.AllowStale != nil { + in, out := &in.AllowStale, &out.AllowStale + *out = new(bool) + **out = **in + } + if in.RefreshInterval != nil { + in, out := &in.RefreshInterval, &out.RefreshInterval + *out = new(monitoringv1.Duration) + **out = **in + } + if in.BasicAuth != nil { + in, out := &in.BasicAuth, &out.BasicAuth + *out = new(monitoringv1.BasicAuth) + (*in).DeepCopyInto(*out) + } + if in.Authorization != nil { + in, out := &in.Authorization, &out.Authorization + *out = new(monitoringv1.SafeAuthorization) + (*in).DeepCopyInto(*out) + } + if in.OAuth2 != nil { + in, out := &in.OAuth2, &out.OAuth2 + *out = new(monitoringv1.OAuth2) + (*in).DeepCopyInto(*out) + } + in.ProxyConfig.DeepCopyInto(&out.ProxyConfig) + if in.FollowRedirects != nil { + in, out := &in.FollowRedirects, &out.FollowRedirects + *out = new(bool) + **out = **in + } + if in.EnableHttp2 != nil { + in, out := &in.EnableHttp2, &out.EnableHttp2 + *out = new(bool) + **out = **in + } + if in.TLSConfig != nil { + in, out := &in.TLSConfig, &out.TLSConfig + *out = new(monitoringv1.SafeTLSConfig) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ConsulSDConfig. +func (in *ConsulSDConfig) DeepCopy() *ConsulSDConfig { + if in == nil { + return nil + } + out := new(ConsulSDConfig) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DNSSDConfig) DeepCopyInto(out *DNSSDConfig) { + *out = *in + if in.Names != nil { + in, out := &in.Names, &out.Names + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.RefreshInterval != nil { + in, out := &in.RefreshInterval, &out.RefreshInterval + *out = new(monitoringv1.Duration) + **out = **in + } + if in.Type != nil { + in, out := &in.Type, &out.Type + *out = new(DNSRecordType) + **out = **in + } + if in.Port != nil { + in, out := &in.Port, &out.Port + *out = new(int32) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DNSSDConfig. +func (in *DNSSDConfig) DeepCopy() *DNSSDConfig { + if in == nil { + return nil + } + out := new(DNSSDConfig) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DayOfMonthRange) DeepCopyInto(out *DayOfMonthRange) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DayOfMonthRange. +func (in *DayOfMonthRange) DeepCopy() *DayOfMonthRange { + if in == nil { + return nil + } + out := new(DayOfMonthRange) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DigitalOceanSDConfig) DeepCopyInto(out *DigitalOceanSDConfig) { + *out = *in + if in.Authorization != nil { + in, out := &in.Authorization, &out.Authorization + *out = new(monitoringv1.SafeAuthorization) + (*in).DeepCopyInto(*out) + } + if in.OAuth2 != nil { + in, out := &in.OAuth2, &out.OAuth2 + *out = new(monitoringv1.OAuth2) + (*in).DeepCopyInto(*out) + } + in.ProxyConfig.DeepCopyInto(&out.ProxyConfig) + if in.FollowRedirects != nil { + in, out := &in.FollowRedirects, &out.FollowRedirects + *out = new(bool) + **out = **in + } + if in.EnableHTTP2 != nil { + in, out := &in.EnableHTTP2, &out.EnableHTTP2 + *out = new(bool) + **out = **in + } + if in.TLSConfig != nil { + in, out := &in.TLSConfig, &out.TLSConfig + *out = new(monitoringv1.SafeTLSConfig) + (*in).DeepCopyInto(*out) + } + if in.Port != nil { + in, out := &in.Port, &out.Port + *out = new(int32) + **out = **in + } + if in.RefreshInterval != nil { + in, out := &in.RefreshInterval, &out.RefreshInterval + *out = new(monitoringv1.Duration) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DigitalOceanSDConfig. +func (in *DigitalOceanSDConfig) DeepCopy() *DigitalOceanSDConfig { + if in == nil { + return nil + } + out := new(DigitalOceanSDConfig) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DiscordConfig) DeepCopyInto(out *DiscordConfig) { + *out = *in + if in.SendResolved != nil { + in, out := &in.SendResolved, &out.SendResolved + *out = new(bool) + **out = **in + } + in.APIURL.DeepCopyInto(&out.APIURL) + if in.Title != nil { + in, out := &in.Title, &out.Title + *out = new(string) + **out = **in + } + if in.Message != nil { + in, out := &in.Message, &out.Message + *out = new(string) + **out = **in + } + if in.Content != nil { + in, out := &in.Content, &out.Content + *out = new(string) + **out = **in + } + if in.Username != nil { + in, out := &in.Username, &out.Username + *out = new(string) + **out = **in + } + if in.AvatarURL != nil { + in, out := &in.AvatarURL, &out.AvatarURL + *out = new(URL) + **out = **in + } + if in.HTTPConfig != nil { + in, out := &in.HTTPConfig, &out.HTTPConfig + *out = new(HTTPConfig) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DiscordConfig. +func (in *DiscordConfig) DeepCopy() *DiscordConfig { + if in == nil { + return nil + } + out := new(DiscordConfig) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DockerSDConfig) DeepCopyInto(out *DockerSDConfig) { + *out = *in + in.ProxyConfig.DeepCopyInto(&out.ProxyConfig) + if in.TLSConfig != nil { + in, out := &in.TLSConfig, &out.TLSConfig + *out = new(monitoringv1.SafeTLSConfig) + (*in).DeepCopyInto(*out) + } + if in.Port != nil { + in, out := &in.Port, &out.Port + *out = new(int32) + **out = **in + } + if in.HostNetworkingHost != nil { + in, out := &in.HostNetworkingHost, &out.HostNetworkingHost + *out = new(string) + **out = **in + } + if in.MatchFirstNetwork != nil { + in, out := &in.MatchFirstNetwork, &out.MatchFirstNetwork + *out = new(bool) + **out = **in + } + if in.Filters != nil { + in, out := &in.Filters, &out.Filters + *out = make(Filters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.RefreshInterval != nil { + in, out := &in.RefreshInterval, &out.RefreshInterval + *out = new(monitoringv1.Duration) + **out = **in + } + if in.BasicAuth != nil { + in, out := &in.BasicAuth, &out.BasicAuth + *out = new(monitoringv1.BasicAuth) + (*in).DeepCopyInto(*out) + } + if in.Authorization != nil { + in, out := &in.Authorization, &out.Authorization + *out = new(monitoringv1.SafeAuthorization) + (*in).DeepCopyInto(*out) + } + if in.OAuth2 != nil { + in, out := &in.OAuth2, &out.OAuth2 + *out = new(monitoringv1.OAuth2) + (*in).DeepCopyInto(*out) + } + if in.FollowRedirects != nil { + in, out := &in.FollowRedirects, &out.FollowRedirects + *out = new(bool) + **out = **in + } + if in.EnableHTTP2 != nil { + in, out := &in.EnableHTTP2, &out.EnableHTTP2 + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DockerSDConfig. +func (in *DockerSDConfig) DeepCopy() *DockerSDConfig { + if in == nil { + return nil + } + out := new(DockerSDConfig) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DockerSwarmSDConfig) DeepCopyInto(out *DockerSwarmSDConfig) { + *out = *in + if in.Port != nil { + in, out := &in.Port, &out.Port + *out = new(int32) + **out = **in + } + if in.Filters != nil { + in, out := &in.Filters, &out.Filters + *out = make(Filters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.RefreshInterval != nil { + in, out := &in.RefreshInterval, &out.RefreshInterval + *out = new(monitoringv1.Duration) + **out = **in + } + if in.BasicAuth != nil { + in, out := &in.BasicAuth, &out.BasicAuth + *out = new(monitoringv1.BasicAuth) + (*in).DeepCopyInto(*out) + } + if in.Authorization != nil { + in, out := &in.Authorization, &out.Authorization + *out = new(monitoringv1.SafeAuthorization) + (*in).DeepCopyInto(*out) + } + if in.OAuth2 != nil { + in, out := &in.OAuth2, &out.OAuth2 + *out = new(monitoringv1.OAuth2) + (*in).DeepCopyInto(*out) + } + in.ProxyConfig.DeepCopyInto(&out.ProxyConfig) + if in.TLSConfig != nil { + in, out := &in.TLSConfig, &out.TLSConfig + *out = new(monitoringv1.SafeTLSConfig) + (*in).DeepCopyInto(*out) + } + if in.FollowRedirects != nil { + in, out := &in.FollowRedirects, &out.FollowRedirects + *out = new(bool) + **out = **in + } + if in.EnableHTTP2 != nil { + in, out := &in.EnableHTTP2, &out.EnableHTTP2 + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DockerSwarmSDConfig. +func (in *DockerSwarmSDConfig) DeepCopy() *DockerSwarmSDConfig { + if in == nil { + return nil + } + out := new(DockerSwarmSDConfig) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *EC2SDConfig) DeepCopyInto(out *EC2SDConfig) { + *out = *in + if in.Region != nil { + in, out := &in.Region, &out.Region + *out = new(string) + **out = **in + } + if in.AccessKey != nil { + in, out := &in.AccessKey, &out.AccessKey + *out = new(corev1.SecretKeySelector) + (*in).DeepCopyInto(*out) + } + if in.SecretKey != nil { + in, out := &in.SecretKey, &out.SecretKey + *out = new(corev1.SecretKeySelector) + (*in).DeepCopyInto(*out) + } + if in.RoleARN != nil { + in, out := &in.RoleARN, &out.RoleARN + *out = new(string) + **out = **in + } + if in.Port != nil { + in, out := &in.Port, &out.Port + *out = new(int32) + **out = **in + } + if in.RefreshInterval != nil { + in, out := &in.RefreshInterval, &out.RefreshInterval + *out = new(monitoringv1.Duration) + **out = **in + } + if in.Filters != nil { + in, out := &in.Filters, &out.Filters + *out = make(Filters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + in.ProxyConfig.DeepCopyInto(&out.ProxyConfig) + if in.TLSConfig != nil { + in, out := &in.TLSConfig, &out.TLSConfig + *out = new(monitoringv1.SafeTLSConfig) + (*in).DeepCopyInto(*out) + } + if in.FollowRedirects != nil { + in, out := &in.FollowRedirects, &out.FollowRedirects + *out = new(bool) + **out = **in + } + if in.EnableHTTP2 != nil { + in, out := &in.EnableHTTP2, &out.EnableHTTP2 + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EC2SDConfig. +func (in *EC2SDConfig) DeepCopy() *EC2SDConfig { + if in == nil { + return nil + } + out := new(EC2SDConfig) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *EmailConfig) DeepCopyInto(out *EmailConfig) { + *out = *in + if in.SendResolved != nil { + in, out := &in.SendResolved, &out.SendResolved + *out = new(bool) + **out = **in + } + if in.AuthPassword != nil { + in, out := &in.AuthPassword, &out.AuthPassword + *out = new(corev1.SecretKeySelector) + (*in).DeepCopyInto(*out) + } + if in.AuthSecret != nil { + in, out := &in.AuthSecret, &out.AuthSecret + *out = new(corev1.SecretKeySelector) + (*in).DeepCopyInto(*out) + } + if in.Headers != nil { + in, out := &in.Headers, &out.Headers + *out = make([]KeyValue, len(*in)) + copy(*out, *in) + } + if in.HTML != nil { + in, out := &in.HTML, &out.HTML + *out = new(string) + **out = **in + } + if in.Text != nil { + in, out := &in.Text, &out.Text + *out = new(string) + **out = **in + } + if in.RequireTLS != nil { + in, out := &in.RequireTLS, &out.RequireTLS + *out = new(bool) + **out = **in + } + if in.TLSConfig != nil { + in, out := &in.TLSConfig, &out.TLSConfig + *out = new(monitoringv1.SafeTLSConfig) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EmailConfig. +func (in *EmailConfig) DeepCopy() *EmailConfig { + if in == nil { + return nil + } + out := new(EmailConfig) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *EurekaSDConfig) DeepCopyInto(out *EurekaSDConfig) { + *out = *in + if in.BasicAuth != nil { + in, out := &in.BasicAuth, &out.BasicAuth + *out = new(monitoringv1.BasicAuth) + (*in).DeepCopyInto(*out) + } + if in.Authorization != nil { + in, out := &in.Authorization, &out.Authorization + *out = new(monitoringv1.SafeAuthorization) + (*in).DeepCopyInto(*out) + } + if in.OAuth2 != nil { + in, out := &in.OAuth2, &out.OAuth2 + *out = new(monitoringv1.OAuth2) + (*in).DeepCopyInto(*out) + } + if in.TLSConfig != nil { + in, out := &in.TLSConfig, &out.TLSConfig + *out = new(monitoringv1.SafeTLSConfig) + (*in).DeepCopyInto(*out) + } + in.ProxyConfig.DeepCopyInto(&out.ProxyConfig) + if in.FollowRedirects != nil { + in, out := &in.FollowRedirects, &out.FollowRedirects + *out = new(bool) + **out = **in + } + if in.EnableHTTP2 != nil { + in, out := &in.EnableHTTP2, &out.EnableHTTP2 + *out = new(bool) + **out = **in + } + if in.RefreshInterval != nil { + in, out := &in.RefreshInterval, &out.RefreshInterval + *out = new(monitoringv1.Duration) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EurekaSDConfig. +func (in *EurekaSDConfig) DeepCopy() *EurekaSDConfig { + if in == nil { + return nil + } + out := new(EurekaSDConfig) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *FileSDConfig) DeepCopyInto(out *FileSDConfig) { + *out = *in + if in.Files != nil { + in, out := &in.Files, &out.Files + *out = make([]SDFile, len(*in)) + copy(*out, *in) + } + if in.RefreshInterval != nil { + in, out := &in.RefreshInterval, &out.RefreshInterval + *out = new(monitoringv1.Duration) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FileSDConfig. +func (in *FileSDConfig) DeepCopy() *FileSDConfig { + if in == nil { + return nil + } + out := new(FileSDConfig) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Filter) DeepCopyInto(out *Filter) { + *out = *in + if in.Values != nil { + in, out := &in.Values, &out.Values + *out = make([]string, len(*in)) + copy(*out, *in) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Filter. +func (in *Filter) DeepCopy() *Filter { + if in == nil { + return nil + } + out := new(Filter) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in Filters) DeepCopyInto(out *Filters) { + { + in := &in + *out = make(Filters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Filters. +func (in Filters) DeepCopy() Filters { + if in == nil { + return nil + } + out := new(Filters) + in.DeepCopyInto(out) + return *out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *GCESDConfig) DeepCopyInto(out *GCESDConfig) { + *out = *in + if in.Filter != nil { + in, out := &in.Filter, &out.Filter + *out = new(string) + **out = **in + } + if in.RefreshInterval != nil { + in, out := &in.RefreshInterval, &out.RefreshInterval + *out = new(monitoringv1.Duration) + **out = **in + } + if in.Port != nil { + in, out := &in.Port, &out.Port + *out = new(int32) + **out = **in + } + if in.TagSeparator != nil { + in, out := &in.TagSeparator, &out.TagSeparator + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GCESDConfig. +func (in *GCESDConfig) DeepCopy() *GCESDConfig { + if in == nil { + return nil + } + out := new(GCESDConfig) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *HTTPConfig) DeepCopyInto(out *HTTPConfig) { + *out = *in + if in.Authorization != nil { + in, out := &in.Authorization, &out.Authorization + *out = new(monitoringv1.SafeAuthorization) + (*in).DeepCopyInto(*out) + } + if in.BasicAuth != nil { + in, out := &in.BasicAuth, &out.BasicAuth + *out = new(monitoringv1.BasicAuth) + (*in).DeepCopyInto(*out) + } + if in.OAuth2 != nil { + in, out := &in.OAuth2, &out.OAuth2 + *out = new(monitoringv1.OAuth2) + (*in).DeepCopyInto(*out) + } + if in.BearerTokenSecret != nil { + in, out := &in.BearerTokenSecret, &out.BearerTokenSecret + *out = new(corev1.SecretKeySelector) + (*in).DeepCopyInto(*out) + } + if in.TLSConfig != nil { + in, out := &in.TLSConfig, &out.TLSConfig + *out = new(monitoringv1.SafeTLSConfig) + (*in).DeepCopyInto(*out) + } + if in.ProxyURLOriginal != nil { + in, out := &in.ProxyURLOriginal, &out.ProxyURLOriginal + *out = new(string) + **out = **in + } + in.ProxyConfig.DeepCopyInto(&out.ProxyConfig) + if in.FollowRedirects != nil { + in, out := &in.FollowRedirects, &out.FollowRedirects + *out = new(bool) + **out = **in + } + if in.EnableHTTP2 != nil { + in, out := &in.EnableHTTP2, &out.EnableHTTP2 + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HTTPConfig. +func (in *HTTPConfig) DeepCopy() *HTTPConfig { + if in == nil { + return nil + } + out := new(HTTPConfig) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *HTTPSDConfig) DeepCopyInto(out *HTTPSDConfig) { + *out = *in + if in.RefreshInterval != nil { + in, out := &in.RefreshInterval, &out.RefreshInterval + *out = new(monitoringv1.Duration) + **out = **in + } + if in.BasicAuth != nil { + in, out := &in.BasicAuth, &out.BasicAuth + *out = new(monitoringv1.BasicAuth) + (*in).DeepCopyInto(*out) + } + if in.Authorization != nil { + in, out := &in.Authorization, &out.Authorization + *out = new(monitoringv1.SafeAuthorization) + (*in).DeepCopyInto(*out) + } + if in.OAuth2 != nil { + in, out := &in.OAuth2, &out.OAuth2 + *out = new(monitoringv1.OAuth2) + (*in).DeepCopyInto(*out) + } + in.ProxyConfig.DeepCopyInto(&out.ProxyConfig) + if in.TLSConfig != nil { + in, out := &in.TLSConfig, &out.TLSConfig + *out = new(monitoringv1.SafeTLSConfig) + (*in).DeepCopyInto(*out) + } + if in.FollowRedirects != nil { + in, out := &in.FollowRedirects, &out.FollowRedirects + *out = new(bool) + **out = **in + } + if in.EnableHTTP2 != nil { + in, out := &in.EnableHTTP2, &out.EnableHTTP2 + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HTTPSDConfig. +func (in *HTTPSDConfig) DeepCopy() *HTTPSDConfig { + if in == nil { + return nil + } + out := new(HTTPSDConfig) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *HetznerSDConfig) DeepCopyInto(out *HetznerSDConfig) { + *out = *in + if in.BasicAuth != nil { + in, out := &in.BasicAuth, &out.BasicAuth + *out = new(monitoringv1.BasicAuth) + (*in).DeepCopyInto(*out) + } + if in.Authorization != nil { + in, out := &in.Authorization, &out.Authorization + *out = new(monitoringv1.SafeAuthorization) + (*in).DeepCopyInto(*out) + } + if in.OAuth2 != nil { + in, out := &in.OAuth2, &out.OAuth2 + *out = new(monitoringv1.OAuth2) + (*in).DeepCopyInto(*out) + } + in.ProxyConfig.DeepCopyInto(&out.ProxyConfig) + if in.FollowRedirects != nil { + in, out := &in.FollowRedirects, &out.FollowRedirects + *out = new(bool) + **out = **in + } + if in.EnableHTTP2 != nil { + in, out := &in.EnableHTTP2, &out.EnableHTTP2 + *out = new(bool) + **out = **in + } + if in.TLSConfig != nil { + in, out := &in.TLSConfig, &out.TLSConfig + *out = new(monitoringv1.SafeTLSConfig) + (*in).DeepCopyInto(*out) + } + if in.Port != nil { + in, out := &in.Port, &out.Port + *out = new(int32) + **out = **in + } + if in.RefreshInterval != nil { + in, out := &in.RefreshInterval, &out.RefreshInterval + *out = new(monitoringv1.Duration) + **out = **in + } + if in.LabelSelector != nil { + in, out := &in.LabelSelector, &out.LabelSelector + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HetznerSDConfig. +func (in *HetznerSDConfig) DeepCopy() *HetznerSDConfig { + if in == nil { + return nil + } + out := new(HetznerSDConfig) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *InhibitRule) DeepCopyInto(out *InhibitRule) { + *out = *in + if in.TargetMatch != nil { + in, out := &in.TargetMatch, &out.TargetMatch + *out = make([]Matcher, len(*in)) + copy(*out, *in) + } + if in.SourceMatch != nil { + in, out := &in.SourceMatch, &out.SourceMatch + *out = make([]Matcher, len(*in)) + copy(*out, *in) + } + if in.Equal != nil { + in, out := &in.Equal, &out.Equal + *out = make([]string, len(*in)) + copy(*out, *in) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new InhibitRule. +func (in *InhibitRule) DeepCopy() *InhibitRule { + if in == nil { + return nil + } + out := new(InhibitRule) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *IonosSDConfig) DeepCopyInto(out *IonosSDConfig) { + *out = *in + if in.Port != nil { + in, out := &in.Port, &out.Port + *out = new(int32) + **out = **in + } + if in.RefreshInterval != nil { + in, out := &in.RefreshInterval, &out.RefreshInterval + *out = new(monitoringv1.Duration) + **out = **in + } + in.Authorization.DeepCopyInto(&out.Authorization) + in.ProxyConfig.DeepCopyInto(&out.ProxyConfig) + if in.TLSConfig != nil { + in, out := &in.TLSConfig, &out.TLSConfig + *out = new(monitoringv1.SafeTLSConfig) + (*in).DeepCopyInto(*out) + } + if in.FollowRedirects != nil { + in, out := &in.FollowRedirects, &out.FollowRedirects + *out = new(bool) + **out = **in + } + if in.EnableHTTP2 != nil { + in, out := &in.EnableHTTP2, &out.EnableHTTP2 + *out = new(bool) + **out = **in + } + if in.OAuth2 != nil { + in, out := &in.OAuth2, &out.OAuth2 + *out = new(monitoringv1.OAuth2) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IonosSDConfig. +func (in *IonosSDConfig) DeepCopy() *IonosSDConfig { + if in == nil { + return nil + } + out := new(IonosSDConfig) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *K8SSelectorConfig) DeepCopyInto(out *K8SSelectorConfig) { + *out = *in + if in.Label != nil { + in, out := &in.Label, &out.Label + *out = new(string) + **out = **in + } + if in.Field != nil { + in, out := &in.Field, &out.Field + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new K8SSelectorConfig. +func (in *K8SSelectorConfig) DeepCopy() *K8SSelectorConfig { + if in == nil { + return nil + } + out := new(K8SSelectorConfig) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *KeyValue) DeepCopyInto(out *KeyValue) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KeyValue. +func (in *KeyValue) DeepCopy() *KeyValue { + if in == nil { + return nil + } + out := new(KeyValue) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *KubernetesSDConfig) DeepCopyInto(out *KubernetesSDConfig) { + *out = *in + if in.APIServer != nil { + in, out := &in.APIServer, &out.APIServer + *out = new(string) + **out = **in + } + if in.Namespaces != nil { + in, out := &in.Namespaces, &out.Namespaces + *out = new(NamespaceDiscovery) + (*in).DeepCopyInto(*out) + } + if in.AttachMetadata != nil { + in, out := &in.AttachMetadata, &out.AttachMetadata + *out = new(AttachMetadata) + (*in).DeepCopyInto(*out) + } + if in.Selectors != nil { + in, out := &in.Selectors, &out.Selectors + *out = make([]K8SSelectorConfig, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.BasicAuth != nil { + in, out := &in.BasicAuth, &out.BasicAuth + *out = new(monitoringv1.BasicAuth) + (*in).DeepCopyInto(*out) + } + if in.Authorization != nil { + in, out := &in.Authorization, &out.Authorization + *out = new(monitoringv1.SafeAuthorization) + (*in).DeepCopyInto(*out) + } + if in.OAuth2 != nil { + in, out := &in.OAuth2, &out.OAuth2 + *out = new(monitoringv1.OAuth2) + (*in).DeepCopyInto(*out) + } + in.ProxyConfig.DeepCopyInto(&out.ProxyConfig) + if in.FollowRedirects != nil { + in, out := &in.FollowRedirects, &out.FollowRedirects + *out = new(bool) + **out = **in + } + if in.EnableHTTP2 != nil { + in, out := &in.EnableHTTP2, &out.EnableHTTP2 + *out = new(bool) + **out = **in + } + if in.TLSConfig != nil { + in, out := &in.TLSConfig, &out.TLSConfig + *out = new(monitoringv1.SafeTLSConfig) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KubernetesSDConfig. +func (in *KubernetesSDConfig) DeepCopy() *KubernetesSDConfig { + if in == nil { + return nil + } + out := new(KubernetesSDConfig) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *KumaSDConfig) DeepCopyInto(out *KumaSDConfig) { + *out = *in + if in.ClientID != nil { + in, out := &in.ClientID, &out.ClientID + *out = new(string) + **out = **in + } + if in.RefreshInterval != nil { + in, out := &in.RefreshInterval, &out.RefreshInterval + *out = new(monitoringv1.Duration) + **out = **in + } + if in.FetchTimeout != nil { + in, out := &in.FetchTimeout, &out.FetchTimeout + *out = new(monitoringv1.Duration) + **out = **in + } + in.ProxyConfig.DeepCopyInto(&out.ProxyConfig) + if in.TLSConfig != nil { + in, out := &in.TLSConfig, &out.TLSConfig + *out = new(monitoringv1.SafeTLSConfig) + (*in).DeepCopyInto(*out) + } + if in.BasicAuth != nil { + in, out := &in.BasicAuth, &out.BasicAuth + *out = new(monitoringv1.BasicAuth) + (*in).DeepCopyInto(*out) + } + if in.Authorization != nil { + in, out := &in.Authorization, &out.Authorization + *out = new(monitoringv1.SafeAuthorization) + (*in).DeepCopyInto(*out) + } + if in.OAuth2 != nil { + in, out := &in.OAuth2, &out.OAuth2 + *out = new(monitoringv1.OAuth2) + (*in).DeepCopyInto(*out) + } + if in.FollowRedirects != nil { + in, out := &in.FollowRedirects, &out.FollowRedirects + *out = new(bool) + **out = **in + } + if in.EnableHTTP2 != nil { + in, out := &in.EnableHTTP2, &out.EnableHTTP2 + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KumaSDConfig. +func (in *KumaSDConfig) DeepCopy() *KumaSDConfig { + if in == nil { + return nil + } + out := new(KumaSDConfig) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LightSailSDConfig) DeepCopyInto(out *LightSailSDConfig) { + *out = *in + if in.Region != nil { + in, out := &in.Region, &out.Region + *out = new(string) + **out = **in + } + if in.AccessKey != nil { + in, out := &in.AccessKey, &out.AccessKey + *out = new(corev1.SecretKeySelector) + (*in).DeepCopyInto(*out) + } + if in.SecretKey != nil { + in, out := &in.SecretKey, &out.SecretKey + *out = new(corev1.SecretKeySelector) + (*in).DeepCopyInto(*out) + } + if in.RoleARN != nil { + in, out := &in.RoleARN, &out.RoleARN + *out = new(string) + **out = **in + } + if in.Endpoint != nil { + in, out := &in.Endpoint, &out.Endpoint + *out = new(string) + **out = **in + } + if in.RefreshInterval != nil { + in, out := &in.RefreshInterval, &out.RefreshInterval + *out = new(monitoringv1.Duration) + **out = **in + } + if in.Port != nil { + in, out := &in.Port, &out.Port + *out = new(int32) + **out = **in + } + if in.BasicAuth != nil { + in, out := &in.BasicAuth, &out.BasicAuth + *out = new(monitoringv1.BasicAuth) + (*in).DeepCopyInto(*out) + } + if in.Authorization != nil { + in, out := &in.Authorization, &out.Authorization + *out = new(monitoringv1.SafeAuthorization) + (*in).DeepCopyInto(*out) + } + if in.OAuth2 != nil { + in, out := &in.OAuth2, &out.OAuth2 + *out = new(monitoringv1.OAuth2) + (*in).DeepCopyInto(*out) + } + in.ProxyConfig.DeepCopyInto(&out.ProxyConfig) + if in.TLSConfig != nil { + in, out := &in.TLSConfig, &out.TLSConfig + *out = new(monitoringv1.SafeTLSConfig) + (*in).DeepCopyInto(*out) + } + if in.FollowRedirects != nil { + in, out := &in.FollowRedirects, &out.FollowRedirects + *out = new(bool) + **out = **in + } + if in.EnableHTTP2 != nil { + in, out := &in.EnableHTTP2, &out.EnableHTTP2 + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LightSailSDConfig. +func (in *LightSailSDConfig) DeepCopy() *LightSailSDConfig { + if in == nil { + return nil + } + out := new(LightSailSDConfig) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LinodeSDConfig) DeepCopyInto(out *LinodeSDConfig) { + *out = *in + if in.Region != nil { + in, out := &in.Region, &out.Region + *out = new(string) + **out = **in + } + if in.Port != nil { + in, out := &in.Port, &out.Port + *out = new(int32) + **out = **in + } + if in.TagSeparator != nil { + in, out := &in.TagSeparator, &out.TagSeparator + *out = new(string) + **out = **in + } + if in.RefreshInterval != nil { + in, out := &in.RefreshInterval, &out.RefreshInterval + *out = new(monitoringv1.Duration) + **out = **in + } + if in.Authorization != nil { + in, out := &in.Authorization, &out.Authorization + *out = new(monitoringv1.SafeAuthorization) + (*in).DeepCopyInto(*out) + } + if in.OAuth2 != nil { + in, out := &in.OAuth2, &out.OAuth2 + *out = new(monitoringv1.OAuth2) + (*in).DeepCopyInto(*out) + } + in.ProxyConfig.DeepCopyInto(&out.ProxyConfig) + if in.FollowRedirects != nil { + in, out := &in.FollowRedirects, &out.FollowRedirects + *out = new(bool) + **out = **in + } + if in.TLSConfig != nil { + in, out := &in.TLSConfig, &out.TLSConfig + *out = new(monitoringv1.SafeTLSConfig) + (*in).DeepCopyInto(*out) + } + if in.EnableHTTP2 != nil { + in, out := &in.EnableHTTP2, &out.EnableHTTP2 + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LinodeSDConfig. +func (in *LinodeSDConfig) DeepCopy() *LinodeSDConfig { + if in == nil { + return nil + } + out := new(LinodeSDConfig) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MSTeamsConfig) DeepCopyInto(out *MSTeamsConfig) { + *out = *in + if in.SendResolved != nil { + in, out := &in.SendResolved, &out.SendResolved + *out = new(bool) + **out = **in + } + in.WebhookURL.DeepCopyInto(&out.WebhookURL) + if in.Title != nil { + in, out := &in.Title, &out.Title + *out = new(string) + **out = **in + } + if in.Summary != nil { + in, out := &in.Summary, &out.Summary + *out = new(string) + **out = **in + } + if in.Text != nil { + in, out := &in.Text, &out.Text + *out = new(string) + **out = **in + } + if in.HTTPConfig != nil { + in, out := &in.HTTPConfig, &out.HTTPConfig + *out = new(HTTPConfig) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MSTeamsConfig. +func (in *MSTeamsConfig) DeepCopy() *MSTeamsConfig { + if in == nil { + return nil + } + out := new(MSTeamsConfig) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MSTeamsV2Config) DeepCopyInto(out *MSTeamsV2Config) { + *out = *in + if in.SendResolved != nil { + in, out := &in.SendResolved, &out.SendResolved + *out = new(bool) + **out = **in + } + if in.WebhookURL != nil { + in, out := &in.WebhookURL, &out.WebhookURL + *out = new(corev1.SecretKeySelector) + (*in).DeepCopyInto(*out) + } + if in.Title != nil { + in, out := &in.Title, &out.Title + *out = new(string) + **out = **in + } + if in.Text != nil { + in, out := &in.Text, &out.Text + *out = new(string) + **out = **in + } + if in.HTTPConfig != nil { + in, out := &in.HTTPConfig, &out.HTTPConfig + *out = new(HTTPConfig) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MSTeamsV2Config. +func (in *MSTeamsV2Config) DeepCopy() *MSTeamsV2Config { + if in == nil { + return nil + } + out := new(MSTeamsV2Config) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Matcher) DeepCopyInto(out *Matcher) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Matcher. +func (in *Matcher) DeepCopy() *Matcher { + if in == nil { + return nil + } + out := new(Matcher) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MuteTimeInterval) DeepCopyInto(out *MuteTimeInterval) { + *out = *in + if in.TimeIntervals != nil { + in, out := &in.TimeIntervals, &out.TimeIntervals + *out = make([]TimeInterval, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MuteTimeInterval. +func (in *MuteTimeInterval) DeepCopy() *MuteTimeInterval { + if in == nil { + return nil + } + out := new(MuteTimeInterval) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NamespaceDiscovery) DeepCopyInto(out *NamespaceDiscovery) { + *out = *in + if in.IncludeOwnNamespace != nil { + in, out := &in.IncludeOwnNamespace, &out.IncludeOwnNamespace + *out = new(bool) + **out = **in + } + if in.Names != nil { + in, out := &in.Names, &out.Names + *out = make([]string, len(*in)) + copy(*out, *in) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NamespaceDiscovery. +func (in *NamespaceDiscovery) DeepCopy() *NamespaceDiscovery { + if in == nil { + return nil + } + out := new(NamespaceDiscovery) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NomadSDConfig) DeepCopyInto(out *NomadSDConfig) { + *out = *in + if in.AllowStale != nil { + in, out := &in.AllowStale, &out.AllowStale + *out = new(bool) + **out = **in + } + if in.Namespace != nil { + in, out := &in.Namespace, &out.Namespace + *out = new(string) + **out = **in + } + if in.RefreshInterval != nil { + in, out := &in.RefreshInterval, &out.RefreshInterval + *out = new(monitoringv1.Duration) + **out = **in + } + if in.Region != nil { + in, out := &in.Region, &out.Region + *out = new(string) + **out = **in + } + if in.TagSeparator != nil { + in, out := &in.TagSeparator, &out.TagSeparator + *out = new(string) + **out = **in + } + if in.BasicAuth != nil { + in, out := &in.BasicAuth, &out.BasicAuth + *out = new(monitoringv1.BasicAuth) + (*in).DeepCopyInto(*out) + } + if in.Authorization != nil { + in, out := &in.Authorization, &out.Authorization + *out = new(monitoringv1.SafeAuthorization) + (*in).DeepCopyInto(*out) + } + if in.OAuth2 != nil { + in, out := &in.OAuth2, &out.OAuth2 + *out = new(monitoringv1.OAuth2) + (*in).DeepCopyInto(*out) + } + if in.TLSConfig != nil { + in, out := &in.TLSConfig, &out.TLSConfig + *out = new(monitoringv1.SafeTLSConfig) + (*in).DeepCopyInto(*out) + } + in.ProxyConfig.DeepCopyInto(&out.ProxyConfig) + if in.FollowRedirects != nil { + in, out := &in.FollowRedirects, &out.FollowRedirects + *out = new(bool) + **out = **in + } + if in.EnableHTTP2 != nil { + in, out := &in.EnableHTTP2, &out.EnableHTTP2 + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NomadSDConfig. +func (in *NomadSDConfig) DeepCopy() *NomadSDConfig { + if in == nil { + return nil + } + out := new(NomadSDConfig) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *OVHCloudSDConfig) DeepCopyInto(out *OVHCloudSDConfig) { + *out = *in + in.ApplicationSecret.DeepCopyInto(&out.ApplicationSecret) + in.ConsumerKey.DeepCopyInto(&out.ConsumerKey) + if in.Endpoint != nil { + in, out := &in.Endpoint, &out.Endpoint + *out = new(string) + **out = **in + } + if in.RefreshInterval != nil { + in, out := &in.RefreshInterval, &out.RefreshInterval + *out = new(monitoringv1.Duration) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OVHCloudSDConfig. +func (in *OVHCloudSDConfig) DeepCopy() *OVHCloudSDConfig { + if in == nil { + return nil + } + out := new(OVHCloudSDConfig) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *OpenStackSDConfig) DeepCopyInto(out *OpenStackSDConfig) { + *out = *in + if in.IdentityEndpoint != nil { + in, out := &in.IdentityEndpoint, &out.IdentityEndpoint + *out = new(string) + **out = **in + } + if in.Username != nil { + in, out := &in.Username, &out.Username + *out = new(string) + **out = **in + } + if in.UserID != nil { + in, out := &in.UserID, &out.UserID + *out = new(string) + **out = **in + } + if in.Password != nil { + in, out := &in.Password, &out.Password + *out = new(corev1.SecretKeySelector) + (*in).DeepCopyInto(*out) + } + if in.DomainName != nil { + in, out := &in.DomainName, &out.DomainName + *out = new(string) + **out = **in + } + if in.DomainID != nil { + in, out := &in.DomainID, &out.DomainID + *out = new(string) + **out = **in + } + if in.ProjectName != nil { + in, out := &in.ProjectName, &out.ProjectName + *out = new(string) + **out = **in + } + if in.ProjectID != nil { + in, out := &in.ProjectID, &out.ProjectID + *out = new(string) + **out = **in + } + if in.ApplicationCredentialName != nil { + in, out := &in.ApplicationCredentialName, &out.ApplicationCredentialName + *out = new(string) + **out = **in + } + if in.ApplicationCredentialID != nil { + in, out := &in.ApplicationCredentialID, &out.ApplicationCredentialID + *out = new(string) + **out = **in + } + if in.ApplicationCredentialSecret != nil { + in, out := &in.ApplicationCredentialSecret, &out.ApplicationCredentialSecret + *out = new(corev1.SecretKeySelector) + (*in).DeepCopyInto(*out) + } + if in.AllTenants != nil { + in, out := &in.AllTenants, &out.AllTenants + *out = new(bool) + **out = **in + } + if in.RefreshInterval != nil { + in, out := &in.RefreshInterval, &out.RefreshInterval + *out = new(monitoringv1.Duration) + **out = **in + } + if in.Port != nil { + in, out := &in.Port, &out.Port + *out = new(int32) + **out = **in + } + if in.Availability != nil { + in, out := &in.Availability, &out.Availability + *out = new(string) + **out = **in + } + if in.TLSConfig != nil { + in, out := &in.TLSConfig, &out.TLSConfig + *out = new(monitoringv1.SafeTLSConfig) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OpenStackSDConfig. +func (in *OpenStackSDConfig) DeepCopy() *OpenStackSDConfig { + if in == nil { + return nil + } + out := new(OpenStackSDConfig) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *OpsGenieConfig) DeepCopyInto(out *OpsGenieConfig) { + *out = *in + if in.SendResolved != nil { + in, out := &in.SendResolved, &out.SendResolved + *out = new(bool) + **out = **in + } + if in.APIKey != nil { + in, out := &in.APIKey, &out.APIKey + *out = new(corev1.SecretKeySelector) + (*in).DeepCopyInto(*out) + } + if in.APIURL != nil { + in, out := &in.APIURL, &out.APIURL + *out = new(URL) + **out = **in + } + if in.UpdateAlerts != nil { + in, out := &in.UpdateAlerts, &out.UpdateAlerts + *out = new(bool) + **out = **in + } + if in.Details != nil { + in, out := &in.Details, &out.Details + *out = make([]KeyValue, len(*in)) + copy(*out, *in) + } + if in.Responders != nil { + in, out := &in.Responders, &out.Responders + *out = make([]OpsGenieConfigResponder, len(*in)) + copy(*out, *in) + } + if in.HTTPConfig != nil { + in, out := &in.HTTPConfig, &out.HTTPConfig + *out = new(HTTPConfig) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OpsGenieConfig. +func (in *OpsGenieConfig) DeepCopy() *OpsGenieConfig { + if in == nil { + return nil + } + out := new(OpsGenieConfig) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *OpsGenieConfigResponder) DeepCopyInto(out *OpsGenieConfigResponder) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OpsGenieConfigResponder. +func (in *OpsGenieConfigResponder) DeepCopy() *OpsGenieConfigResponder { + if in == nil { + return nil + } + out := new(OpsGenieConfigResponder) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PagerDutyConfig) DeepCopyInto(out *PagerDutyConfig) { + *out = *in + if in.SendResolved != nil { + in, out := &in.SendResolved, &out.SendResolved + *out = new(bool) + **out = **in + } + if in.RoutingKey != nil { + in, out := &in.RoutingKey, &out.RoutingKey + *out = new(corev1.SecretKeySelector) + (*in).DeepCopyInto(*out) + } + if in.ServiceKey != nil { + in, out := &in.ServiceKey, &out.ServiceKey + *out = new(corev1.SecretKeySelector) + (*in).DeepCopyInto(*out) + } + if in.URL != nil { + in, out := &in.URL, &out.URL + *out = new(URL) + **out = **in + } + if in.Client != nil { + in, out := &in.Client, &out.Client + *out = new(string) + **out = **in + } + if in.ClientURL != nil { + in, out := &in.ClientURL, &out.ClientURL + *out = new(string) + **out = **in + } + if in.Description != nil { + in, out := &in.Description, &out.Description + *out = new(string) + **out = **in + } + if in.Severity != nil { + in, out := &in.Severity, &out.Severity + *out = new(string) + **out = **in + } + if in.Class != nil { + in, out := &in.Class, &out.Class + *out = new(string) + **out = **in + } + if in.Group != nil { + in, out := &in.Group, &out.Group + *out = new(string) + **out = **in + } + if in.Component != nil { + in, out := &in.Component, &out.Component + *out = new(string) + **out = **in + } + if in.Details != nil { + in, out := &in.Details, &out.Details + *out = make([]KeyValue, len(*in)) + copy(*out, *in) + } + if in.PagerDutyImageConfigs != nil { + in, out := &in.PagerDutyImageConfigs, &out.PagerDutyImageConfigs + *out = make([]PagerDutyImageConfig, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.PagerDutyLinkConfigs != nil { + in, out := &in.PagerDutyLinkConfigs, &out.PagerDutyLinkConfigs + *out = make([]PagerDutyLinkConfig, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.HTTPConfig != nil { + in, out := &in.HTTPConfig, &out.HTTPConfig + *out = new(HTTPConfig) + (*in).DeepCopyInto(*out) + } + if in.Source != nil { + in, out := &in.Source, &out.Source + *out = new(string) + **out = **in + } + if in.Timeout != nil { + in, out := &in.Timeout, &out.Timeout + *out = new(monitoringv1.Duration) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PagerDutyConfig. +func (in *PagerDutyConfig) DeepCopy() *PagerDutyConfig { + if in == nil { + return nil + } + out := new(PagerDutyConfig) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PagerDutyImageConfig) DeepCopyInto(out *PagerDutyImageConfig) { + *out = *in + if in.Src != nil { + in, out := &in.Src, &out.Src + *out = new(string) + **out = **in + } + if in.Href != nil { + in, out := &in.Href, &out.Href + *out = new(string) + **out = **in + } + if in.Alt != nil { + in, out := &in.Alt, &out.Alt + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PagerDutyImageConfig. +func (in *PagerDutyImageConfig) DeepCopy() *PagerDutyImageConfig { + if in == nil { + return nil + } + out := new(PagerDutyImageConfig) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PagerDutyLinkConfig) DeepCopyInto(out *PagerDutyLinkConfig) { + *out = *in + if in.Href != nil { + in, out := &in.Href, &out.Href + *out = new(string) + **out = **in + } + if in.Text != nil { + in, out := &in.Text, &out.Text + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PagerDutyLinkConfig. +func (in *PagerDutyLinkConfig) DeepCopy() *PagerDutyLinkConfig { + if in == nil { + return nil + } + out := new(PagerDutyLinkConfig) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PrometheusAgent) DeepCopyInto(out *PrometheusAgent) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PrometheusAgent. +func (in *PrometheusAgent) DeepCopy() *PrometheusAgent { + if in == nil { + return nil + } + out := new(PrometheusAgent) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PrometheusAgentList) DeepCopyInto(out *PrometheusAgentList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]PrometheusAgent, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PrometheusAgentList. +func (in *PrometheusAgentList) DeepCopy() *PrometheusAgentList { + if in == nil { + return nil + } + out := new(PrometheusAgentList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PrometheusAgentSpec) DeepCopyInto(out *PrometheusAgentSpec) { + *out = *in + if in.Mode != nil { + in, out := &in.Mode, &out.Mode + *out = new(PrometheusAgentMode) + **out = **in + } + in.CommonPrometheusFields.DeepCopyInto(&out.CommonPrometheusFields) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PrometheusAgentSpec. +func (in *PrometheusAgentSpec) DeepCopy() *PrometheusAgentSpec { + if in == nil { + return nil + } + out := new(PrometheusAgentSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PuppetDBSDConfig) DeepCopyInto(out *PuppetDBSDConfig) { + *out = *in + if in.IncludeParameters != nil { + in, out := &in.IncludeParameters, &out.IncludeParameters + *out = new(bool) + **out = **in + } + if in.RefreshInterval != nil { + in, out := &in.RefreshInterval, &out.RefreshInterval + *out = new(monitoringv1.Duration) + **out = **in + } + if in.Port != nil { + in, out := &in.Port, &out.Port + *out = new(int32) + **out = **in + } + if in.BasicAuth != nil { + in, out := &in.BasicAuth, &out.BasicAuth + *out = new(monitoringv1.BasicAuth) + (*in).DeepCopyInto(*out) + } + if in.Authorization != nil { + in, out := &in.Authorization, &out.Authorization + *out = new(monitoringv1.SafeAuthorization) + (*in).DeepCopyInto(*out) + } + if in.OAuth2 != nil { + in, out := &in.OAuth2, &out.OAuth2 + *out = new(monitoringv1.OAuth2) + (*in).DeepCopyInto(*out) + } + in.ProxyConfig.DeepCopyInto(&out.ProxyConfig) + if in.TLSConfig != nil { + in, out := &in.TLSConfig, &out.TLSConfig + *out = new(monitoringv1.SafeTLSConfig) + (*in).DeepCopyInto(*out) + } + if in.FollowRedirects != nil { + in, out := &in.FollowRedirects, &out.FollowRedirects + *out = new(bool) + **out = **in + } + if in.EnableHTTP2 != nil { + in, out := &in.EnableHTTP2, &out.EnableHTTP2 + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PuppetDBSDConfig. +func (in *PuppetDBSDConfig) DeepCopy() *PuppetDBSDConfig { + if in == nil { + return nil + } + out := new(PuppetDBSDConfig) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PushoverConfig) DeepCopyInto(out *PushoverConfig) { + *out = *in + if in.SendResolved != nil { + in, out := &in.SendResolved, &out.SendResolved + *out = new(bool) + **out = **in + } + if in.UserKey != nil { + in, out := &in.UserKey, &out.UserKey + *out = new(corev1.SecretKeySelector) + (*in).DeepCopyInto(*out) + } + if in.UserKeyFile != nil { + in, out := &in.UserKeyFile, &out.UserKeyFile + *out = new(string) + **out = **in + } + if in.Token != nil { + in, out := &in.Token, &out.Token + *out = new(corev1.SecretKeySelector) + (*in).DeepCopyInto(*out) + } + if in.TokenFile != nil { + in, out := &in.TokenFile, &out.TokenFile + *out = new(string) + **out = **in + } + if in.Title != nil { + in, out := &in.Title, &out.Title + *out = new(string) + **out = **in + } + if in.Message != nil { + in, out := &in.Message, &out.Message + *out = new(string) + **out = **in + } + if in.URLTitle != nil { + in, out := &in.URLTitle, &out.URLTitle + *out = new(string) + **out = **in + } + if in.TTL != nil { + in, out := &in.TTL, &out.TTL + *out = new(monitoringv1.Duration) + **out = **in + } + if in.Device != nil { + in, out := &in.Device, &out.Device + *out = new(string) + **out = **in + } + if in.Sound != nil { + in, out := &in.Sound, &out.Sound + *out = new(string) + **out = **in + } + if in.Priority != nil { + in, out := &in.Priority, &out.Priority + *out = new(string) + **out = **in + } + if in.Retry != nil { + in, out := &in.Retry, &out.Retry + *out = new(string) + **out = **in + } + if in.Expire != nil { + in, out := &in.Expire, &out.Expire + *out = new(string) + **out = **in + } + if in.HTML != nil { + in, out := &in.HTML, &out.HTML + *out = new(bool) + **out = **in + } + if in.Monospace != nil { + in, out := &in.Monospace, &out.Monospace + *out = new(bool) + **out = **in + } + if in.HTTPConfig != nil { + in, out := &in.HTTPConfig, &out.HTTPConfig + *out = new(HTTPConfig) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PushoverConfig. +func (in *PushoverConfig) DeepCopy() *PushoverConfig { + if in == nil { + return nil + } + out := new(PushoverConfig) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Receiver) DeepCopyInto(out *Receiver) { + *out = *in + if in.OpsGenieConfigs != nil { + in, out := &in.OpsGenieConfigs, &out.OpsGenieConfigs + *out = make([]OpsGenieConfig, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.PagerDutyConfigs != nil { + in, out := &in.PagerDutyConfigs, &out.PagerDutyConfigs + *out = make([]PagerDutyConfig, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.DiscordConfigs != nil { + in, out := &in.DiscordConfigs, &out.DiscordConfigs + *out = make([]DiscordConfig, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.SlackConfigs != nil { + in, out := &in.SlackConfigs, &out.SlackConfigs + *out = make([]SlackConfig, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.WebhookConfigs != nil { + in, out := &in.WebhookConfigs, &out.WebhookConfigs + *out = make([]WebhookConfig, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.WeChatConfigs != nil { + in, out := &in.WeChatConfigs, &out.WeChatConfigs + *out = make([]WeChatConfig, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.EmailConfigs != nil { + in, out := &in.EmailConfigs, &out.EmailConfigs + *out = make([]EmailConfig, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.VictorOpsConfigs != nil { + in, out := &in.VictorOpsConfigs, &out.VictorOpsConfigs + *out = make([]VictorOpsConfig, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.PushoverConfigs != nil { + in, out := &in.PushoverConfigs, &out.PushoverConfigs + *out = make([]PushoverConfig, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.SNSConfigs != nil { + in, out := &in.SNSConfigs, &out.SNSConfigs + *out = make([]SNSConfig, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.TelegramConfigs != nil { + in, out := &in.TelegramConfigs, &out.TelegramConfigs + *out = make([]TelegramConfig, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.WebexConfigs != nil { + in, out := &in.WebexConfigs, &out.WebexConfigs + *out = make([]WebexConfig, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.MSTeamsConfigs != nil { + in, out := &in.MSTeamsConfigs, &out.MSTeamsConfigs + *out = make([]MSTeamsConfig, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.MSTeamsV2Configs != nil { + in, out := &in.MSTeamsV2Configs, &out.MSTeamsV2Configs + *out = make([]MSTeamsV2Config, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.RocketChatConfigs != nil { + in, out := &in.RocketChatConfigs, &out.RocketChatConfigs + *out = make([]RocketChatConfig, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Receiver. +func (in *Receiver) DeepCopy() *Receiver { + if in == nil { + return nil + } + out := new(Receiver) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RocketChatActionConfig) DeepCopyInto(out *RocketChatActionConfig) { + *out = *in + if in.Text != nil { + in, out := &in.Text, &out.Text + *out = new(string) + **out = **in + } + if in.URL != nil { + in, out := &in.URL, &out.URL + *out = new(string) + **out = **in + } + if in.Msg != nil { + in, out := &in.Msg, &out.Msg + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RocketChatActionConfig. +func (in *RocketChatActionConfig) DeepCopy() *RocketChatActionConfig { + if in == nil { + return nil + } + out := new(RocketChatActionConfig) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RocketChatConfig) DeepCopyInto(out *RocketChatConfig) { + *out = *in + if in.SendResolved != nil { + in, out := &in.SendResolved, &out.SendResolved + *out = new(bool) + **out = **in + } + if in.APIURL != nil { + in, out := &in.APIURL, &out.APIURL + *out = new(URL) + **out = **in + } + if in.Channel != nil { + in, out := &in.Channel, &out.Channel + *out = new(string) + **out = **in + } + in.Token.DeepCopyInto(&out.Token) + in.TokenID.DeepCopyInto(&out.TokenID) + if in.Color != nil { + in, out := &in.Color, &out.Color + *out = new(string) + **out = **in + } + if in.Emoji != nil { + in, out := &in.Emoji, &out.Emoji + *out = new(string) + **out = **in + } + if in.IconURL != nil { + in, out := &in.IconURL, &out.IconURL + *out = new(string) + **out = **in + } + if in.Text != nil { + in, out := &in.Text, &out.Text + *out = new(string) + **out = **in + } + if in.Title != nil { + in, out := &in.Title, &out.Title + *out = new(string) + **out = **in + } + if in.TitleLink != nil { + in, out := &in.TitleLink, &out.TitleLink + *out = new(string) + **out = **in + } + if in.Fields != nil { + in, out := &in.Fields, &out.Fields + *out = make([]RocketChatFieldConfig, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.ShortFields != nil { + in, out := &in.ShortFields, &out.ShortFields + *out = new(bool) + **out = **in + } + if in.ImageURL != nil { + in, out := &in.ImageURL, &out.ImageURL + *out = new(string) + **out = **in + } + if in.ThumbURL != nil { + in, out := &in.ThumbURL, &out.ThumbURL + *out = new(string) + **out = **in + } + if in.LinkNames != nil { + in, out := &in.LinkNames, &out.LinkNames + *out = new(bool) + **out = **in + } + if in.Actions != nil { + in, out := &in.Actions, &out.Actions + *out = make([]RocketChatActionConfig, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.HTTPConfig != nil { + in, out := &in.HTTPConfig, &out.HTTPConfig + *out = new(HTTPConfig) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RocketChatConfig. +func (in *RocketChatConfig) DeepCopy() *RocketChatConfig { + if in == nil { + return nil + } + out := new(RocketChatConfig) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RocketChatFieldConfig) DeepCopyInto(out *RocketChatFieldConfig) { + *out = *in + if in.Title != nil { + in, out := &in.Title, &out.Title + *out = new(string) + **out = **in + } + if in.Value != nil { + in, out := &in.Value, &out.Value + *out = new(string) + **out = **in + } + if in.Short != nil { + in, out := &in.Short, &out.Short + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RocketChatFieldConfig. +func (in *RocketChatFieldConfig) DeepCopy() *RocketChatFieldConfig { + if in == nil { + return nil + } + out := new(RocketChatFieldConfig) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Route) DeepCopyInto(out *Route) { + *out = *in + if in.GroupBy != nil { + in, out := &in.GroupBy, &out.GroupBy + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.Matchers != nil { + in, out := &in.Matchers, &out.Matchers + *out = make([]Matcher, len(*in)) + copy(*out, *in) + } + if in.Routes != nil { + in, out := &in.Routes, &out.Routes + *out = make([]v1.JSON, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.MuteTimeIntervals != nil { + in, out := &in.MuteTimeIntervals, &out.MuteTimeIntervals + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.ActiveTimeIntervals != nil { + in, out := &in.ActiveTimeIntervals, &out.ActiveTimeIntervals + *out = make([]string, len(*in)) + copy(*out, *in) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Route. +func (in *Route) DeepCopy() *Route { + if in == nil { + return nil + } + out := new(Route) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SNSConfig) DeepCopyInto(out *SNSConfig) { + *out = *in + if in.SendResolved != nil { + in, out := &in.SendResolved, &out.SendResolved + *out = new(bool) + **out = **in + } + if in.Sigv4 != nil { + in, out := &in.Sigv4, &out.Sigv4 + *out = new(monitoringv1.Sigv4) + (*in).DeepCopyInto(*out) + } + if in.Attributes != nil { + in, out := &in.Attributes, &out.Attributes + *out = make(map[string]string, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } + if in.HTTPConfig != nil { + in, out := &in.HTTPConfig, &out.HTTPConfig + *out = new(HTTPConfig) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SNSConfig. +func (in *SNSConfig) DeepCopy() *SNSConfig { + if in == nil { + return nil + } + out := new(SNSConfig) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ScalewaySDConfig) DeepCopyInto(out *ScalewaySDConfig) { + *out = *in + in.SecretKey.DeepCopyInto(&out.SecretKey) + if in.Port != nil { + in, out := &in.Port, &out.Port + *out = new(int32) + **out = **in + } + if in.ApiURL != nil { + in, out := &in.ApiURL, &out.ApiURL + *out = new(string) + **out = **in + } + if in.Zone != nil { + in, out := &in.Zone, &out.Zone + *out = new(string) + **out = **in + } + if in.NameFilter != nil { + in, out := &in.NameFilter, &out.NameFilter + *out = new(string) + **out = **in + } + if in.TagsFilter != nil { + in, out := &in.TagsFilter, &out.TagsFilter + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.RefreshInterval != nil { + in, out := &in.RefreshInterval, &out.RefreshInterval + *out = new(monitoringv1.Duration) + **out = **in + } + in.ProxyConfig.DeepCopyInto(&out.ProxyConfig) + if in.FollowRedirects != nil { + in, out := &in.FollowRedirects, &out.FollowRedirects + *out = new(bool) + **out = **in + } + if in.EnableHTTP2 != nil { + in, out := &in.EnableHTTP2, &out.EnableHTTP2 + *out = new(bool) + **out = **in + } + if in.TLSConfig != nil { + in, out := &in.TLSConfig, &out.TLSConfig + *out = new(monitoringv1.SafeTLSConfig) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ScalewaySDConfig. +func (in *ScalewaySDConfig) DeepCopy() *ScalewaySDConfig { + if in == nil { + return nil + } + out := new(ScalewaySDConfig) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ScrapeConfig) DeepCopyInto(out *ScrapeConfig) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ScrapeConfig. +func (in *ScrapeConfig) DeepCopy() *ScrapeConfig { + if in == nil { + return nil + } + out := new(ScrapeConfig) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ScrapeConfigList) DeepCopyInto(out *ScrapeConfigList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]ScrapeConfig, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ScrapeConfigList. +func (in *ScrapeConfigList) DeepCopy() *ScrapeConfigList { + if in == nil { + return nil + } + out := new(ScrapeConfigList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ScrapeConfigSpec) DeepCopyInto(out *ScrapeConfigSpec) { + *out = *in + if in.JobName != nil { + in, out := &in.JobName, &out.JobName + *out = new(string) + **out = **in + } + if in.StaticConfigs != nil { + in, out := &in.StaticConfigs, &out.StaticConfigs + *out = make([]StaticConfig, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.FileSDConfigs != nil { + in, out := &in.FileSDConfigs, &out.FileSDConfigs + *out = make([]FileSDConfig, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.HTTPSDConfigs != nil { + in, out := &in.HTTPSDConfigs, &out.HTTPSDConfigs + *out = make([]HTTPSDConfig, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.KubernetesSDConfigs != nil { + in, out := &in.KubernetesSDConfigs, &out.KubernetesSDConfigs + *out = make([]KubernetesSDConfig, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.ConsulSDConfigs != nil { + in, out := &in.ConsulSDConfigs, &out.ConsulSDConfigs + *out = make([]ConsulSDConfig, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.DNSSDConfigs != nil { + in, out := &in.DNSSDConfigs, &out.DNSSDConfigs + *out = make([]DNSSDConfig, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.EC2SDConfigs != nil { + in, out := &in.EC2SDConfigs, &out.EC2SDConfigs + *out = make([]EC2SDConfig, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.AzureSDConfigs != nil { + in, out := &in.AzureSDConfigs, &out.AzureSDConfigs + *out = make([]AzureSDConfig, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.GCESDConfigs != nil { + in, out := &in.GCESDConfigs, &out.GCESDConfigs + *out = make([]GCESDConfig, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.OpenStackSDConfigs != nil { + in, out := &in.OpenStackSDConfigs, &out.OpenStackSDConfigs + *out = make([]OpenStackSDConfig, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.DigitalOceanSDConfigs != nil { + in, out := &in.DigitalOceanSDConfigs, &out.DigitalOceanSDConfigs + *out = make([]DigitalOceanSDConfig, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.KumaSDConfigs != nil { + in, out := &in.KumaSDConfigs, &out.KumaSDConfigs + *out = make([]KumaSDConfig, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.EurekaSDConfigs != nil { + in, out := &in.EurekaSDConfigs, &out.EurekaSDConfigs + *out = make([]EurekaSDConfig, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.DockerSDConfigs != nil { + in, out := &in.DockerSDConfigs, &out.DockerSDConfigs + *out = make([]DockerSDConfig, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.LinodeSDConfigs != nil { + in, out := &in.LinodeSDConfigs, &out.LinodeSDConfigs + *out = make([]LinodeSDConfig, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.HetznerSDConfigs != nil { + in, out := &in.HetznerSDConfigs, &out.HetznerSDConfigs + *out = make([]HetznerSDConfig, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.NomadSDConfigs != nil { + in, out := &in.NomadSDConfigs, &out.NomadSDConfigs + *out = make([]NomadSDConfig, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.DockerSwarmSDConfigs != nil { + in, out := &in.DockerSwarmSDConfigs, &out.DockerSwarmSDConfigs + *out = make([]DockerSwarmSDConfig, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.PuppetDBSDConfigs != nil { + in, out := &in.PuppetDBSDConfigs, &out.PuppetDBSDConfigs + *out = make([]PuppetDBSDConfig, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.LightSailSDConfigs != nil { + in, out := &in.LightSailSDConfigs, &out.LightSailSDConfigs + *out = make([]LightSailSDConfig, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.OVHCloudSDConfigs != nil { + in, out := &in.OVHCloudSDConfigs, &out.OVHCloudSDConfigs + *out = make([]OVHCloudSDConfig, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.ScalewaySDConfigs != nil { + in, out := &in.ScalewaySDConfigs, &out.ScalewaySDConfigs + *out = make([]ScalewaySDConfig, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.IonosSDConfigs != nil { + in, out := &in.IonosSDConfigs, &out.IonosSDConfigs + *out = make([]IonosSDConfig, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.RelabelConfigs != nil { + in, out := &in.RelabelConfigs, &out.RelabelConfigs + *out = make([]monitoringv1.RelabelConfig, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.MetricsPath != nil { + in, out := &in.MetricsPath, &out.MetricsPath + *out = new(string) + **out = **in + } + if in.ScrapeInterval != nil { + in, out := &in.ScrapeInterval, &out.ScrapeInterval + *out = new(monitoringv1.Duration) + **out = **in + } + if in.ScrapeTimeout != nil { + in, out := &in.ScrapeTimeout, &out.ScrapeTimeout + *out = new(monitoringv1.Duration) + **out = **in + } + if in.ScrapeProtocols != nil { + in, out := &in.ScrapeProtocols, &out.ScrapeProtocols + *out = make([]monitoringv1.ScrapeProtocol, len(*in)) + copy(*out, *in) + } + if in.FallbackScrapeProtocol != nil { + in, out := &in.FallbackScrapeProtocol, &out.FallbackScrapeProtocol + *out = new(monitoringv1.ScrapeProtocol) + **out = **in + } + if in.HonorTimestamps != nil { + in, out := &in.HonorTimestamps, &out.HonorTimestamps + *out = new(bool) + **out = **in + } + if in.TrackTimestampsStaleness != nil { + in, out := &in.TrackTimestampsStaleness, &out.TrackTimestampsStaleness + *out = new(bool) + **out = **in + } + if in.HonorLabels != nil { + in, out := &in.HonorLabels, &out.HonorLabels + *out = new(bool) + **out = **in + } + if in.Params != nil { + in, out := &in.Params, &out.Params + *out = make(map[string][]string, len(*in)) + for key, val := range *in { + var outVal []string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = make([]string, len(*in)) + copy(*out, *in) + } + (*out)[key] = outVal + } + } + if in.Scheme != nil { + in, out := &in.Scheme, &out.Scheme + *out = new(monitoringv1.Scheme) + **out = **in + } + if in.EnableCompression != nil { + in, out := &in.EnableCompression, &out.EnableCompression + *out = new(bool) + **out = **in + } + if in.EnableHTTP2 != nil { + in, out := &in.EnableHTTP2, &out.EnableHTTP2 + *out = new(bool) + **out = **in + } + if in.BasicAuth != nil { + in, out := &in.BasicAuth, &out.BasicAuth + *out = new(monitoringv1.BasicAuth) + (*in).DeepCopyInto(*out) + } + if in.Authorization != nil { + in, out := &in.Authorization, &out.Authorization + *out = new(monitoringv1.SafeAuthorization) + (*in).DeepCopyInto(*out) + } + if in.OAuth2 != nil { + in, out := &in.OAuth2, &out.OAuth2 + *out = new(monitoringv1.OAuth2) + (*in).DeepCopyInto(*out) + } + if in.TLSConfig != nil { + in, out := &in.TLSConfig, &out.TLSConfig + *out = new(monitoringv1.SafeTLSConfig) + (*in).DeepCopyInto(*out) + } + if in.SampleLimit != nil { + in, out := &in.SampleLimit, &out.SampleLimit + *out = new(uint64) + **out = **in + } + if in.TargetLimit != nil { + in, out := &in.TargetLimit, &out.TargetLimit + *out = new(uint64) + **out = **in + } + if in.LabelLimit != nil { + in, out := &in.LabelLimit, &out.LabelLimit + *out = new(uint64) + **out = **in + } + if in.LabelNameLengthLimit != nil { + in, out := &in.LabelNameLengthLimit, &out.LabelNameLengthLimit + *out = new(uint64) + **out = **in + } + if in.LabelValueLengthLimit != nil { + in, out := &in.LabelValueLengthLimit, &out.LabelValueLengthLimit + *out = new(uint64) + **out = **in + } + in.NativeHistogramConfig.DeepCopyInto(&out.NativeHistogramConfig) + if in.KeepDroppedTargets != nil { + in, out := &in.KeepDroppedTargets, &out.KeepDroppedTargets + *out = new(uint64) + **out = **in + } + if in.MetricRelabelConfigs != nil { + in, out := &in.MetricRelabelConfigs, &out.MetricRelabelConfigs + *out = make([]monitoringv1.RelabelConfig, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + in.ProxyConfig.DeepCopyInto(&out.ProxyConfig) + if in.NameValidationScheme != nil { + in, out := &in.NameValidationScheme, &out.NameValidationScheme + *out = new(monitoringv1.NameValidationSchemeOptions) + **out = **in + } + if in.NameEscapingScheme != nil { + in, out := &in.NameEscapingScheme, &out.NameEscapingScheme + *out = new(monitoringv1.NameEscapingSchemeOptions) + **out = **in + } + if in.ScrapeClassName != nil { + in, out := &in.ScrapeClassName, &out.ScrapeClassName + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ScrapeConfigSpec. +func (in *ScrapeConfigSpec) DeepCopy() *ScrapeConfigSpec { + if in == nil { + return nil + } + out := new(ScrapeConfigSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SlackAction) DeepCopyInto(out *SlackAction) { + *out = *in + if in.Style != nil { + in, out := &in.Style, &out.Style + *out = new(string) + **out = **in + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.Value != nil { + in, out := &in.Value, &out.Value + *out = new(string) + **out = **in + } + if in.ConfirmField != nil { + in, out := &in.ConfirmField, &out.ConfirmField + *out = new(SlackConfirmationField) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SlackAction. +func (in *SlackAction) DeepCopy() *SlackAction { + if in == nil { + return nil + } + out := new(SlackAction) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SlackConfig) DeepCopyInto(out *SlackConfig) { + *out = *in + if in.SendResolved != nil { + in, out := &in.SendResolved, &out.SendResolved + *out = new(bool) + **out = **in + } + if in.APIURL != nil { + in, out := &in.APIURL, &out.APIURL + *out = new(corev1.SecretKeySelector) + (*in).DeepCopyInto(*out) + } + if in.Channel != nil { + in, out := &in.Channel, &out.Channel + *out = new(string) + **out = **in + } + if in.Username != nil { + in, out := &in.Username, &out.Username + *out = new(string) + **out = **in + } + if in.Color != nil { + in, out := &in.Color, &out.Color + *out = new(string) + **out = **in + } + if in.Title != nil { + in, out := &in.Title, &out.Title + *out = new(string) + **out = **in + } + if in.Pretext != nil { + in, out := &in.Pretext, &out.Pretext + *out = new(string) + **out = **in + } + if in.Text != nil { + in, out := &in.Text, &out.Text + *out = new(string) + **out = **in + } + if in.Fields != nil { + in, out := &in.Fields, &out.Fields + *out = make([]SlackField, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.ShortFields != nil { + in, out := &in.ShortFields, &out.ShortFields + *out = new(bool) + **out = **in + } + if in.Footer != nil { + in, out := &in.Footer, &out.Footer + *out = new(string) + **out = **in + } + if in.Fallback != nil { + in, out := &in.Fallback, &out.Fallback + *out = new(string) + **out = **in + } + if in.CallbackID != nil { + in, out := &in.CallbackID, &out.CallbackID + *out = new(string) + **out = **in + } + if in.IconEmoji != nil { + in, out := &in.IconEmoji, &out.IconEmoji + *out = new(string) + **out = **in + } + if in.LinkNames != nil { + in, out := &in.LinkNames, &out.LinkNames + *out = new(bool) + **out = **in + } + if in.MrkdwnIn != nil { + in, out := &in.MrkdwnIn, &out.MrkdwnIn + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.Actions != nil { + in, out := &in.Actions, &out.Actions + *out = make([]SlackAction, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.HTTPConfig != nil { + in, out := &in.HTTPConfig, &out.HTTPConfig + *out = new(HTTPConfig) + (*in).DeepCopyInto(*out) + } + if in.Timeout != nil { + in, out := &in.Timeout, &out.Timeout + *out = new(monitoringv1.Duration) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SlackConfig. +func (in *SlackConfig) DeepCopy() *SlackConfig { + if in == nil { + return nil + } + out := new(SlackConfig) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SlackConfirmationField) DeepCopyInto(out *SlackConfirmationField) { + *out = *in + if in.Title != nil { + in, out := &in.Title, &out.Title + *out = new(string) + **out = **in + } + if in.OkText != nil { + in, out := &in.OkText, &out.OkText + *out = new(string) + **out = **in + } + if in.DismissText != nil { + in, out := &in.DismissText, &out.DismissText + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SlackConfirmationField. +func (in *SlackConfirmationField) DeepCopy() *SlackConfirmationField { + if in == nil { + return nil + } + out := new(SlackConfirmationField) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SlackField) DeepCopyInto(out *SlackField) { + *out = *in + if in.Short != nil { + in, out := &in.Short, &out.Short + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SlackField. +func (in *SlackField) DeepCopy() *SlackField { + if in == nil { + return nil + } + out := new(SlackField) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *StaticConfig) DeepCopyInto(out *StaticConfig) { + *out = *in + if in.Targets != nil { + in, out := &in.Targets, &out.Targets + *out = make([]Target, len(*in)) + copy(*out, *in) + } + if in.Labels != nil { + in, out := &in.Labels, &out.Labels + *out = make(map[string]string, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StaticConfig. +func (in *StaticConfig) DeepCopy() *StaticConfig { + if in == nil { + return nil + } + out := new(StaticConfig) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TelegramConfig) DeepCopyInto(out *TelegramConfig) { + *out = *in + if in.SendResolved != nil { + in, out := &in.SendResolved, &out.SendResolved + *out = new(bool) + **out = **in + } + if in.APIURL != nil { + in, out := &in.APIURL, &out.APIURL + *out = new(URL) + **out = **in + } + if in.BotToken != nil { + in, out := &in.BotToken, &out.BotToken + *out = new(corev1.SecretKeySelector) + (*in).DeepCopyInto(*out) + } + if in.BotTokenFile != nil { + in, out := &in.BotTokenFile, &out.BotTokenFile + *out = new(string) + **out = **in + } + if in.MessageThreadID != nil { + in, out := &in.MessageThreadID, &out.MessageThreadID + *out = new(int64) + **out = **in + } + if in.DisableNotifications != nil { + in, out := &in.DisableNotifications, &out.DisableNotifications + *out = new(bool) + **out = **in + } + if in.HTTPConfig != nil { + in, out := &in.HTTPConfig, &out.HTTPConfig + *out = new(HTTPConfig) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TelegramConfig. +func (in *TelegramConfig) DeepCopy() *TelegramConfig { + if in == nil { + return nil + } + out := new(TelegramConfig) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TimeInterval) DeepCopyInto(out *TimeInterval) { + *out = *in + if in.Times != nil { + in, out := &in.Times, &out.Times + *out = make([]TimeRange, len(*in)) + copy(*out, *in) + } + if in.Weekdays != nil { + in, out := &in.Weekdays, &out.Weekdays + *out = make([]WeekdayRange, len(*in)) + copy(*out, *in) + } + if in.DaysOfMonth != nil { + in, out := &in.DaysOfMonth, &out.DaysOfMonth + *out = make([]DayOfMonthRange, len(*in)) + copy(*out, *in) + } + if in.Months != nil { + in, out := &in.Months, &out.Months + *out = make([]MonthRange, len(*in)) + copy(*out, *in) + } + if in.Years != nil { + in, out := &in.Years, &out.Years + *out = make([]YearRange, len(*in)) + copy(*out, *in) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TimeInterval. +func (in *TimeInterval) DeepCopy() *TimeInterval { + if in == nil { + return nil + } + out := new(TimeInterval) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TimeRange) DeepCopyInto(out *TimeRange) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TimeRange. +func (in *TimeRange) DeepCopy() *TimeRange { + if in == nil { + return nil + } + out := new(TimeRange) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *VictorOpsConfig) DeepCopyInto(out *VictorOpsConfig) { + *out = *in + if in.SendResolved != nil { + in, out := &in.SendResolved, &out.SendResolved + *out = new(bool) + **out = **in + } + if in.APIKey != nil { + in, out := &in.APIKey, &out.APIKey + *out = new(corev1.SecretKeySelector) + (*in).DeepCopyInto(*out) + } + if in.APIURL != nil { + in, out := &in.APIURL, &out.APIURL + *out = new(URL) + **out = **in + } + if in.MessageType != nil { + in, out := &in.MessageType, &out.MessageType + *out = new(string) + **out = **in + } + if in.EntityDisplayName != nil { + in, out := &in.EntityDisplayName, &out.EntityDisplayName + *out = new(string) + **out = **in + } + if in.StateMessage != nil { + in, out := &in.StateMessage, &out.StateMessage + *out = new(string) + **out = **in + } + if in.MonitoringTool != nil { + in, out := &in.MonitoringTool, &out.MonitoringTool + *out = new(string) + **out = **in + } + if in.CustomFields != nil { + in, out := &in.CustomFields, &out.CustomFields + *out = make([]KeyValue, len(*in)) + copy(*out, *in) + } + if in.HTTPConfig != nil { + in, out := &in.HTTPConfig, &out.HTTPConfig + *out = new(HTTPConfig) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VictorOpsConfig. +func (in *VictorOpsConfig) DeepCopy() *VictorOpsConfig { + if in == nil { + return nil + } + out := new(VictorOpsConfig) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *WeChatConfig) DeepCopyInto(out *WeChatConfig) { + *out = *in + if in.SendResolved != nil { + in, out := &in.SendResolved, &out.SendResolved + *out = new(bool) + **out = **in + } + if in.APISecret != nil { + in, out := &in.APISecret, &out.APISecret + *out = new(corev1.SecretKeySelector) + (*in).DeepCopyInto(*out) + } + if in.APIURL != nil { + in, out := &in.APIURL, &out.APIURL + *out = new(URL) + **out = **in + } + if in.HTTPConfig != nil { + in, out := &in.HTTPConfig, &out.HTTPConfig + *out = new(HTTPConfig) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WeChatConfig. +func (in *WeChatConfig) DeepCopy() *WeChatConfig { + if in == nil { + return nil + } + out := new(WeChatConfig) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *WebexConfig) DeepCopyInto(out *WebexConfig) { + *out = *in + if in.SendResolved != nil { + in, out := &in.SendResolved, &out.SendResolved + *out = new(bool) + **out = **in + } + if in.APIURL != nil { + in, out := &in.APIURL, &out.APIURL + *out = new(URL) + **out = **in + } + if in.HTTPConfig != nil { + in, out := &in.HTTPConfig, &out.HTTPConfig + *out = new(HTTPConfig) + (*in).DeepCopyInto(*out) + } + if in.Message != nil { + in, out := &in.Message, &out.Message + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WebexConfig. +func (in *WebexConfig) DeepCopy() *WebexConfig { + if in == nil { + return nil + } + out := new(WebexConfig) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *WebhookConfig) DeepCopyInto(out *WebhookConfig) { + *out = *in + if in.SendResolved != nil { + in, out := &in.SendResolved, &out.SendResolved + *out = new(bool) + **out = **in + } + if in.URL != nil { + in, out := &in.URL, &out.URL + *out = new(string) + **out = **in + } + if in.URLSecret != nil { + in, out := &in.URLSecret, &out.URLSecret + *out = new(corev1.SecretKeySelector) + (*in).DeepCopyInto(*out) + } + if in.HTTPConfig != nil { + in, out := &in.HTTPConfig, &out.HTTPConfig + *out = new(HTTPConfig) + (*in).DeepCopyInto(*out) + } + if in.Timeout != nil { + in, out := &in.Timeout, &out.Timeout + *out = new(monitoringv1.Duration) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WebhookConfig. +func (in *WebhookConfig) DeepCopy() *WebhookConfig { + if in == nil { + return nil + } + out := new(WebhookConfig) + in.DeepCopyInto(out) + return out +} diff --git a/vendor/github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring/v1beta1/alertmanager_config_types.go b/vendor/github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring/v1beta1/alertmanager_config_types.go new file mode 100644 index 0000000000..89e8cf42d6 --- /dev/null +++ b/vendor/github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring/v1beta1/alertmanager_config_types.go @@ -0,0 +1,1643 @@ +// Copyright 2020 The prometheus-operator Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package v1beta1 + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "html/template" + "regexp" + "strings" + + monitoringv1 "github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring/v1" + + v1 "k8s.io/api/core/v1" + apiextensionsv1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/utils/ptr" +) + +const ( + Version = "v1beta1" + + AlertmanagerConfigKind = "AlertmanagerConfig" + AlertmanagerConfigName = "alertmanagerconfigs" + AlertmanagerConfigKindKey = "alertmanagerconfig" +) + +// +genclient +// +k8s:openapi-gen=true +// +kubebuilder:resource:categories="prometheus-operator",shortName="amcfg" +// +kubebuilder:subresource:status + +// The `AlertmanagerConfig` custom resource definition (CRD) defines how `Alertmanager` objects process Prometheus alerts. It allows to specify alert grouping and routing, notification receivers and inhibition rules. +// +// `Alertmanager` objects select `AlertmanagerConfig` objects using label and namespace selectors. +type AlertmanagerConfig struct { + // TypeMeta defines the versioned schema of this representation of an object. + metav1.TypeMeta `json:",inline"` + // metadata defines ObjectMeta as the metadata that all persisted resources. + // +optional + metav1.ObjectMeta `json:"metadata,omitempty"` + // spec defines the specification of AlertmanagerConfigSpec + // +required + Spec AlertmanagerConfigSpec `json:"spec"` + // status defines the status subresource. It is under active development and is updated only when the + // "StatusForConfigurationResources" feature gate is enabled. + // + // Most recent observed status of the ServiceMonitor. Read-only. + // More info: + // https://github.com/kubernetes/community/blob/master/contributors/devel/sig-architecture/api-conventions.md#spec-and-status + // +optional + Status monitoringv1.ConfigResourceStatus `json:"status,omitempty,omitzero"` +} + +// AlertmanagerConfigList is a list of AlertmanagerConfig. +// +k8s:openapi-gen=true +type AlertmanagerConfigList struct { + // TypeMeta defines the versioned schema of this representation of an object. + metav1.TypeMeta `json:",inline"` + // metadata defines ListMeta as metadata for collection responses. + metav1.ListMeta `json:"metadata,omitempty"` + // List of AlertmanagerConfig + Items []AlertmanagerConfig `json:"items"` +} + +// AlertmanagerConfigSpec is a specification of the desired behavior of the Alertmanager configuration. +// By definition, the Alertmanager configuration only applies to alerts for which +// the `namespace` label is equal to the namespace of the AlertmanagerConfig resource. +type AlertmanagerConfigSpec struct { + // route defines the Alertmanager route definition for alerts matching the resource's + // namespace. If present, it will be added to the generated Alertmanager + // configuration as a first-level route. + // +optional + Route *Route `json:"route"` + // receivers defines the list of receivers. + // +optional + Receivers []Receiver `json:"receivers"` + // inhibitRules defines the list of inhibition rules. The rules will only apply to alerts matching + // the resource's namespace. + // +optional + InhibitRules []InhibitRule `json:"inhibitRules,omitempty"` + // timeIntervals defines the list of timeIntervals specifying when the routes should be muted. + // +optional + TimeIntervals []TimeInterval `json:"timeIntervals,omitempty"` +} + +// Route defines a node in the routing tree. +type Route struct { + // receiver defines the name of the receiver for this route. If not empty, it should be listed in + // the `receivers` field. + // +optional + Receiver string `json:"receiver"` + // groupBy defines the list of labels to group by. + // Labels must not be repeated (unique list). + // Special label "..." (aggregate by all possible labels), if provided, must be the only element in the list. + // +optional + GroupBy []string `json:"groupBy,omitempty"` + // groupWait defines how long to wait before sending the initial notification. + // Must match the regular expression`^(([0-9]+)y)?(([0-9]+)w)?(([0-9]+)d)?(([0-9]+)h)?(([0-9]+)m)?(([0-9]+)s)?(([0-9]+)ms)?$` + // Example: "30s" + // +optional + GroupWait string `json:"groupWait,omitempty"` + // groupInterval defines how long to wait before sending an updated notification. + // Must match the regular expression`^(([0-9]+)y)?(([0-9]+)w)?(([0-9]+)d)?(([0-9]+)h)?(([0-9]+)m)?(([0-9]+)s)?(([0-9]+)ms)?$` + // Example: "5m" + // +optional + GroupInterval string `json:"groupInterval,omitempty"` + // repeatInterval defines how long to wait before repeating the last notification. + // Must match the regular expression`^(([0-9]+)y)?(([0-9]+)w)?(([0-9]+)d)?(([0-9]+)h)?(([0-9]+)m)?(([0-9]+)s)?(([0-9]+)ms)?$` + // Example: "4h" + // +optional + RepeatInterval string `json:"repeatInterval,omitempty"` + // matchers defines the list of matchers that the alert's labels should match. For the first + // level route, the operator removes any existing equality and regexp + // matcher on the `namespace` label and adds a `namespace: ` matcher. + // +optional + Matchers []Matcher `json:"matchers,omitempty"` + // continue defines the boolean indicating whether an alert should continue matching subsequent + // sibling nodes. It will always be overridden to true for the first-level + // route by the Prometheus operator. + // +optional + Continue bool `json:"continue,omitempty"` // nolint:kubeapilinter + // routes defines the child routes. + // +optional + Routes []apiextensionsv1.JSON `json:"routes,omitempty"` + // Note: this comment applies to the field definition above but appears + // below otherwise it gets included in the generated manifest. + // CRD schema doesn't support self-referential types for now (see + // https://github.com/kubernetes/kubernetes/issues/62872). We have to use + // an alternative type to circumvent the limitation. The downside is that + // the Kube API can't validate the data beyond the fact that it is a valid + // JSON representation. + + // muteTimeIntervals is a list of MuteTimeInterval names that will mute this route when matched, + // +optional + MuteTimeIntervals []string `json:"muteTimeIntervals,omitempty"` + // activeTimeIntervals is a list of TimeInterval names when this route should be active. + // +optional + ActiveTimeIntervals []string `json:"activeTimeIntervals,omitempty"` +} + +// ChildRoutes extracts the child routes. +func (r *Route) ChildRoutes() ([]Route, error) { + out := make([]Route, len(r.Routes)) + + for i, v := range r.Routes { + dec := json.NewDecoder(bytes.NewBuffer(v.Raw)) + dec.DisallowUnknownFields() + if err := dec.Decode(&out[i]); err != nil { + return nil, fmt.Errorf("route[%d]: %w", i, err) + } + } + + return out, nil +} + +// Receiver defines one or more notification integrations. +type Receiver struct { + // name defines the name of the receiver. Must be unique across all items from the list. + // +kubebuilder:validation:MinLength=1 + // +required + Name string `json:"name"` + // opsgenieConfigs defines the list of OpsGenie configurations. + // +optional + OpsGenieConfigs []OpsGenieConfig `json:"opsgenieConfigs,omitempty"` + // pagerdutyConfigs defines the List of PagerDuty configurations. + // +optional + PagerDutyConfigs []PagerDutyConfig `json:"pagerdutyConfigs,omitempty"` + // discordConfigs defines the list of Slack configurations. + // +optional + DiscordConfigs []DiscordConfig `json:"discordConfigs,omitempty"` + // slackConfigs defines the list of Slack configurations. + // +optional + SlackConfigs []SlackConfig `json:"slackConfigs,omitempty"` + // webhookConfigs defines the List of webhook configurations. + // +optional + WebhookConfigs []WebhookConfig `json:"webhookConfigs,omitempty"` + // wechatConfigs defines the list of WeChat configurations. + // +optional + WeChatConfigs []WeChatConfig `json:"wechatConfigs,omitempty"` + // emailConfigs defines the list of Email configurations. + // +optional + EmailConfigs []EmailConfig `json:"emailConfigs,omitempty"` + // victoropsConfigs defines the list of VictorOps configurations. + // +optional + VictorOpsConfigs []VictorOpsConfig `json:"victoropsConfigs,omitempty"` + // pushoverConfigs defines the list of Pushover configurations. + // +optional + PushoverConfigs []PushoverConfig `json:"pushoverConfigs,omitempty"` + // snsConfigs defines the list of SNS configurations + // +optional + SNSConfigs []SNSConfig `json:"snsConfigs,omitempty"` + // telegramConfigs defines the list of Telegram configurations. + // +optional + TelegramConfigs []TelegramConfig `json:"telegramConfigs,omitempty"` + // webexConfigs defines the list of Webex configurations. + // +optional + WebexConfigs []WebexConfig `json:"webexConfigs,omitempty"` + // msteamsConfigs defines the list of MSTeams configurations. + // It requires Alertmanager >= 0.26.0. + // +optional + MSTeamsConfigs []MSTeamsConfig `json:"msteamsConfigs,omitempty"` + // msteamsv2Configs defines the list of MSTeamsV2 configurations. + // It requires Alertmanager >= 0.28.0. + // +optional + MSTeamsV2Configs []MSTeamsV2Config `json:"msteamsv2Configs,omitempty"` + // rocketchatConfigs defines the list of RocketChat configurations. + // It requires Alertmanager >= 0.28.0. + // +optional + RocketChatConfigs []RocketChatConfig `json:"rocketchatConfigs,omitempty"` +} + +// PagerDutyConfig configures notifications via PagerDuty. +// See https://prometheus.io/docs/alerting/latest/configuration/#pagerduty_config +type PagerDutyConfig struct { + // sendResolved defines whether or not to notify about resolved alerts. + // +optional + SendResolved *bool `json:"sendResolved,omitempty"` // nolint:kubeapilinter + // routingKey defines the secret's key that contains the PagerDuty integration key (when using + // Events API v2). Either this field or `serviceKey` needs to be defined. + // The secret needs to be in the same namespace as the AlertmanagerConfig + // object and accessible by the Prometheus Operator. + // +optional + RoutingKey *SecretKeySelector `json:"routingKey,omitempty"` + // serviceKey defines the secret's key that contains the PagerDuty service key (when using + // integration type "Prometheus"). Either this field or `routingKey` needs to + // be defined. + // The secret needs to be in the same namespace as the AlertmanagerConfig + // object and accessible by the Prometheus Operator. + // +optional + ServiceKey *SecretKeySelector `json:"serviceKey,omitempty"` + // url defines the URL to send requests to. + // +optional + URL *URL `json:"url,omitempty"` + // client defines the client identification. + // +kubebuilder:validation:MinLength=1 + // +optional + Client *string `json:"client,omitempty"` + // clientURL defines the backlink to the sender of notification. + // +optional + ClientURL *string `json:"clientURL,omitempty"` + // description of the incident. + // +kubebuilder:validation:MinLength=1 + // +optional + Description *string `json:"description,omitempty"` + // severity of the incident. + // +kubebuilder:validation:MinLength=1 + // +optional + Severity *string `json:"severity,omitempty"` + // class defines the class/type of the event. + // +kubebuilder:validation:MinLength=1 + // +optional + Class *string `json:"class,omitempty"` + // group defines a cluster or grouping of sources. + // +kubebuilder:validation:MinLength=1 + // +optional + Group *string `json:"group,omitempty"` + // component defines the part or component of the affected system that is broken. + // +kubebuilder:validation:MinLength=1 + // +optional + Component *string `json:"component,omitempty"` + // details defines the arbitrary key/value pairs that provide further detail about the incident. + // +listType=atomic + // +optional + Details []KeyValue `json:"details,omitempty"` + // pagerDutyImageConfigs defines a list of image details to attach that provide further detail about an incident. + // +listType=atomic + // +optional + PagerDutyImageConfigs []PagerDutyImageConfig `json:"pagerDutyImageConfigs,omitempty"` + // pagerDutyLinkConfigs defines a list of link details to attach that provide further detail about an incident. + // +listType=atomic + // +optional + PagerDutyLinkConfigs []PagerDutyLinkConfig `json:"pagerDutyLinkConfigs,omitempty"` + // httpConfig defines the HTTP client configuration. + // +optional + HTTPConfig *HTTPConfig `json:"httpConfig,omitempty"` + // source defines the unique location of the affected system. + // +kubebuilder:validation:MinLength=1 + // +optional + Source *string `json:"source,omitempty"` + // timeout is the maximum time allowed to invoke the pagerduty + // It requires Alertmanager >= v0.30.0. + // +optional + Timeout *monitoringv1.Duration `json:"timeout,omitempty"` +} + +// PagerDutyImageConfig attaches images to an incident +type PagerDutyImageConfig struct { + // src of the image being attached to the incident + // +kubebuilder:validation:MinLength=1 + // +optional + Src *string `json:"src,omitempty"` + // href defines the optional URL; makes the image a clickable link. + // +optional + Href *string `json:"href,omitempty"` + // alt is the optional alternative text for the image. + // +kubebuilder:validation:MinLength=1 + // +optional + Alt *string `json:"alt,omitempty"` +} + +// PagerDutyLinkConfig attaches text links to an incident +type PagerDutyLinkConfig struct { + // href defines the URL of the link to be attached + // +optional + Href *string `json:"href,omitempty"` + // alt defines the text that describes the purpose of the link, and can be used as the link's text. + // +kubebuilder:validation:MinLength=1 + // +optional + Text *string `json:"alt,omitempty"` +} + +// DiscordConfig configures notifications via Discord. +// See https://prometheus.io/docs/alerting/latest/configuration/#discord_config +type DiscordConfig struct { + // sendResolved defines whether or not to notify about resolved alerts. + // +optional + SendResolved *bool `json:"sendResolved,omitempty"` // nolint:kubeapilinter + // apiURL defines the secret's key that contains the Discord webhook URL. + // The secret needs to be in the same namespace as the AlertmanagerConfig + // object and accessible by the Prometheus Operator. + // +required + APIURL v1.SecretKeySelector `json:"apiURL,omitempty"` + // title defines the template of the message's title. + // +optional + Title *string `json:"title,omitempty"` + // message defines the template of the message's body. + // +optional + Message *string `json:"message,omitempty"` + // content defines the template of the content's body. + // +optional + // +kubebuilder:validation:MinLength=1 + Content *string `json:"content,omitempty"` + // username defines the username of the message sender. + // +optional + // +kubebuilder:validation:MinLength=1 + Username *string `json:"username,omitempty"` + // avatarURL defines the avatar url of the message sender. + // +optional + AvatarURL *URL `json:"avatarURL,omitempty"` + // httpConfig defines HTTP client configuration. + // +optional + HTTPConfig *HTTPConfig `json:"httpConfig,omitempty"` +} + +// SlackConfig configures notifications via Slack. +// See https://prometheus.io/docs/alerting/latest/configuration/#slack_config +type SlackConfig struct { + // sendResolved defines whether or not to notify about resolved alerts. + // +optional + SendResolved *bool `json:"sendResolved,omitempty"` // nolint:kubeapilinter + // apiURL defines the secret's key that contains the Slack webhook URL. + // The secret needs to be in the same namespace as the AlertmanagerConfig + // object and accessible by the Prometheus Operator. + // +optional + APIURL *SecretKeySelector `json:"apiURL,omitempty"` + // channel defines the channel or user to send notifications to. + // +kubebuilder:validation:MinLength=1 + // +optional + Channel *string `json:"channel,omitempty"` + // username defines the slack bot user name. + // +kubebuilder:validation:MinLength=1 + // +optional + Username *string `json:"username,omitempty"` + // color defines the color of the left border of the Slack message attachment. + // Can be a hex color code (e.g., "#ff0000") or a predefined color name. + // +kubebuilder:validation:MinLength=1 + // +optional + Color *string `json:"color,omitempty"` + // title defines the title text displayed in the Slack message attachment. + // +kubebuilder:validation:MinLength=1 + // +optional + Title *string `json:"title,omitempty"` + // titleLink defines the URL that the title will link to when clicked. + // +optional + TitleLink string `json:"titleLink,omitempty"` + // pretext defines optional text that appears above the message attachment block. + // +kubebuilder:validation:MinLength=1 + // +optional + Pretext *string `json:"pretext,omitempty"` + // text defines the main text content of the Slack message attachment. + // +kubebuilder:validation:MinLength=1 + // +optional + Text *string `json:"text,omitempty"` + // fields defines a list of Slack fields that are sent with each notification. + // +kubebuilder:validation:MinItems=1 + // +listType=atomic + // +optional + Fields []SlackField `json:"fields,omitempty"` + // shortFields determines whether fields are displayed in a compact format. + // When true, fields are shown side by side when possible. + // +optional + ShortFields *bool `json:"shortFields,omitempty"` // nolint:kubeapilinter + // footer defines small text displayed at the bottom of the message attachment. + // +kubebuilder:validation:MinLength=1 + // +optional + Footer *string `json:"footer,omitempty"` + // fallback defines a plain-text summary of the attachment for clients that don't support attachments. + // +kubebuilder:validation:MinLength=1 + // +optional + Fallback *string `json:"fallback,omitempty"` + // callbackId defines an identifier for the message used in interactive components. + // +kubebuilder:validation:MinLength=1 + // +optional + CallbackID *string `json:"callbackId,omitempty"` + // iconEmoji defines the emoji to use as the bot's avatar (e.g., ":ghost:"). + // +kubebuilder:validation:MinLength=1 + // +optional + IconEmoji *string `json:"iconEmoji,omitempty"` + // iconURL defines the URL to an image to use as the bot's avatar. + // +optional + IconURL string `json:"iconURL,omitempty"` + // imageURL defines the URL to an image file that will be displayed inside the message attachment. + // +optional + ImageURL string `json:"imageURL,omitempty"` + // thumbURL defines the URL to an image file that will be displayed as a thumbnail + // on the right side of the message attachment. + // +optional + ThumbURL string `json:"thumbURL,omitempty"` + // linkNames enables automatic linking of channel names and usernames in the message. + // When true, @channel and @username will be converted to clickable links. + // +optional + LinkNames *bool `json:"linkNames,omitempty"` // nolint:kubeapilinter + // mrkdwnIn defines which fields should be parsed as Slack markdown. + // Valid values include "pretext", "text", and "fields". + // +listType=atomic + // +kubebuilder:validation:MinItems=1 + // +kubebuilder:validation:items:MinLength=1 + // +optional + MrkdwnIn []string `json:"mrkdwnIn,omitempty"` + // actions defines a list of Slack actions that are sent with each notification. + // +kubebuilder:validation:MinItems=1 + // +listType=atomic + // +optional + Actions []SlackAction `json:"actions,omitempty"` + // httpConfig defines the HTTP client configuration. + // +optional + HTTPConfig *HTTPConfig `json:"httpConfig,omitempty"` + // timeout defines the maximum time to wait for a webhook request to complete, + // before failing the request and allowing it to be retried. + // It requires Alertmanager >= v0.30.0. + // +optional + Timeout *monitoringv1.Duration `json:"timeout,omitempty"` +} + +// Validate ensures SlackConfig is valid. +func (sc *SlackConfig) Validate() error { + for _, action := range sc.Actions { + if err := action.Validate(); err != nil { + return err + } + } + + for _, field := range sc.Fields { + if err := field.Validate(); err != nil { + return err + } + } + + return nil +} + +// SlackAction configures a single Slack action that is sent with each +// notification. +// See https://api.slack.com/docs/message-attachments#action_fields and +// https://api.slack.com/docs/message-buttons for more information. +type SlackAction struct { + // type defines the type of interactive component. + // Common values include "button" for clickable buttons and "select" for dropdown menus. + // +kubebuilder:validation:MinLength=1 + // +required + Type string `json:"type"` + // text defines the user-visible label displayed on the action element. + // For buttons, this is the button text. For select menus, this is the placeholder text. + // +kubebuilder:validation:MinLength=1 + // +required + Text string `json:"text"` + // url defines the URL to open when the action is triggered. + // Only applicable for button-type actions. When set, clicking the button opens this URL. + // +optional + URL string `json:"url,omitempty"` + // style defines the visual appearance of the action element. + // Valid values include "default", "primary" (green), and "danger" (red). + // +kubebuilder:validation:MinLength=1 + // +optional + Style *string `json:"style,omitempty"` + // name defines a unique identifier for the action within the message. + // This value is sent back to your application when the action is triggered. + // +kubebuilder:validation:MinLength=1 + // +optional + Name *string `json:"name,omitempty"` + // value defines the payload sent when the action is triggered. + // This data is included in the callback sent to your application. + // +kubebuilder:validation:MinLength=1 + // +optional + Value *string `json:"value,omitempty"` + // confirm defines an optional confirmation dialog that appears before the action is executed. + // When set, users must confirm their intent before the action proceeds. + // +optional + ConfirmField *SlackConfirmationField `json:"confirm,omitempty"` +} + +// Validate ensures SlackAction is valid. +func (sa *SlackAction) Validate() error { + if sa.Type == "" { + return errors.New("missing type in Slack action configuration") + } + + if sa.Text == "" { + return errors.New("missing text in Slack action configuration") + } + + if sa.URL == "" && ptr.Deref(sa.Name, "") == "" { + return errors.New("missing name or url in Slack action configuration") + } + + if sa.ConfirmField != nil { + if err := sa.ConfirmField.Validate(); err != nil { + return err + } + } + + return nil +} + +// SlackConfirmationField protect users from destructive actions or +// particularly distinguished decisions by asking them to confirm their button +// click one more time. +// See https://api.slack.com/docs/interactive-message-field-guide#confirmation_fields +// for more information. +type SlackConfirmationField struct { + // text defines the main message displayed in the confirmation dialog. + // This should be a clear question or statement asking the user to confirm their action. + // +kubebuilder:validation:MinLength=1 + // +required + Text string `json:"text"` + // title defines the title text displayed at the top of the confirmation dialog. + // When not specified, a default title will be used. + // +kubebuilder:validation:MinLength=1 + // +optional + Title *string `json:"title,omitempty"` + // okText defines the label for the confirmation button in the dialog. + // When not specified, defaults to "Okay". This button proceeds with the action. + // +kubebuilder:validation:MinLength=1 + // +optional + OkText *string `json:"okText,omitempty"` + // dismissText defines the label for the cancel button in the dialog. + // When not specified, defaults to "Cancel". This button cancels the action. + // +kubebuilder:validation:MinLength=1 + // +optional + DismissText *string `json:"dismissText,omitempty"` +} + +// Validate ensures SlackConfirmationField is valid. +func (scf *SlackConfirmationField) Validate() error { + if scf.Text == "" { + return errors.New("missing text in Slack confirmation configuration") + } + return nil +} + +// SlackField configures a single Slack field that is sent with each notification. +// Each field must contain a title, value, and optionally, a boolean value to indicate if the field +// is short enough to be displayed next to other fields designated as short. +// See https://api.slack.com/docs/message-attachments#fields for more information. +type SlackField struct { + // title defines the label or header text displayed for this field. + // This appears as bold text above the field value in the Slack message. + // +kubebuilder:validation:MinLength=1 + // +required + Title string `json:"title"` + // value defines the content or data displayed for this field. + // This appears below the title and can contain plain text or Slack markdown. + // +kubebuilder:validation:MinLength=1 + // +required + Value string `json:"value"` + // short determines whether this field can be displayed alongside other short fields. + // When true, Slack may display this field side by side with other short fields. + // When false or not specified, the field takes the full width of the message. + // +optional + Short *bool `json:"short,omitempty"` // nolint:kubeapilinter +} + +// Validate ensures SlackField is valid +func (sf *SlackField) Validate() error { + if sf.Title == "" { + return errors.New("missing title in Slack field configuration") + } + + if sf.Value == "" { + return errors.New("missing value in Slack field configuration") + } + + return nil +} + +// WebhookConfig configures notifications via a generic receiver supporting the webhook payload. +// See https://prometheus.io/docs/alerting/latest/configuration/#webhook_config +type WebhookConfig struct { + // sendResolved defines whether or not to notify about resolved alerts. + // +optional + SendResolved *bool `json:"sendResolved,omitempty"` // nolint:kubeapilinter + // url defines the URL to send HTTP POST requests to. + // urlSecret takes precedence over url. One of urlSecret and url should be defined. + // +optional + URL *string `json:"url,omitempty"` + // urlSecret defines the secret's key that contains the webhook URL to send HTTP requests to. + // urlSecret takes precedence over url. One of urlSecret and url should be defined. + // The secret needs to be in the same namespace as the AlertmanagerConfig + // object and accessible by the Prometheus Operator. + // +optional + URLSecret *SecretKeySelector `json:"urlSecret,omitempty"` + // httpConfig defines the HTTP client configuration for webhook requests. + // +optional + HTTPConfig *HTTPConfig `json:"httpConfig,omitempty"` + // maxAlerts defines the maximum number of alerts to be sent per webhook message. + // When 0, all alerts are included in the webhook payload. + // +optional + // +kubebuilder:validation:Minimum=0 + MaxAlerts int32 `json:"maxAlerts,omitempty"` + // timeout defines the maximum time to wait for a webhook request to complete, + // before failing the request and allowing it to be retried. + // It requires Alertmanager >= v0.28.0. + // +optional + Timeout *monitoringv1.Duration `json:"timeout,omitempty"` +} + +// OpsGenieConfig configures notifications via OpsGenie. +// See https://prometheus.io/docs/alerting/latest/configuration/#opsgenie_config +type OpsGenieConfig struct { + // sendResolved defines whether or not to notify about resolved alerts. + // +optional + SendResolved *bool `json:"sendResolved,omitempty"` // nolint:kubeapilinter + // apiKey defines the secret's key that contains the OpsGenie API key. + // The secret needs to be in the same namespace as the AlertmanagerConfig + // object and accessible by the Prometheus Operator. + // +optional + APIKey *SecretKeySelector `json:"apiKey,omitempty"` + // apiURL defines the URL to send OpsGenie API requests to. + // When not specified, defaults to the standard OpsGenie API endpoint. + // +optional + APIURL *URL `json:"apiURL,omitempty"` + // message defines the alert text limited to 130 characters. + // This appears as the main alert title in OpsGenie. + // +optional + Message string `json:"message,omitempty"` + // description defines the detailed description of the incident. + // This provides additional context beyond the message field. + // +optional + Description string `json:"description,omitempty"` + // source defines the backlink to the sender of the notification. + // This helps identify where the alert originated from. + // +optional + Source string `json:"source,omitempty"` + // tags defines a comma separated list of tags attached to the notifications. + // These help categorize and filter alerts within OpsGenie. + // +optional + Tags string `json:"tags,omitempty"` + // note defines an additional alert note. + // This provides supplementary information about the alert. + // +optional + Note string `json:"note,omitempty"` + // priority defines the priority level of alert. + // Possible values are P1, P2, P3, P4, and P5, where P1 is highest priority. + // +optional + Priority string `json:"priority,omitempty"` + // details defines a set of arbitrary key/value pairs that provide further detail about the incident. + // These appear as additional fields in the OpsGenie alert. + // +optional + Details []KeyValue `json:"details,omitempty"` + // responders defines the list of responders responsible for notifications. + // These determine who gets notified when the alert is created. + // +optional + Responders []OpsGenieConfigResponder `json:"responders,omitempty"` + // httpConfig defines the HTTP client configuration for OpsGenie API requests. + // +optional + HTTPConfig *HTTPConfig `json:"httpConfig,omitempty"` + // entity defines an optional field that can be used to specify which domain alert is related to. + // This helps group related alerts together in OpsGenie. + // +optional + Entity string `json:"entity,omitempty"` + // actions defines a comma separated list of actions that will be available for the alert. + // These appear as action buttons in the OpsGenie interface. + // +optional + Actions string `json:"actions,omitempty"` +} + +// Validate ensures OpsGenieConfig is valid +func (o *OpsGenieConfig) Validate() error { + for _, responder := range o.Responders { + if err := responder.Validate(); err != nil { + return err + } + } + return nil +} + +// OpsGenieConfigResponder defines a responder to an incident. +// One of `id`, `name` or `username` has to be defined. +// +kubebuilder:validation:OneOf=ID,Name,Username +type OpsGenieConfigResponder struct { + // id defines the unique identifier of the responder. + // This corresponds to the responder's ID within OpsGenie. + // +optional + ID string `json:"id,omitempty"` + // name defines the display name of the responder. + // This is used when the responder is identified by name rather than ID. + // +optional + Name string `json:"name,omitempty"` + // username defines the username of the responder. + // This is typically used for user-type responders when identifying by username. + // +optional + Username string `json:"username,omitempty"` + // type defines the type of responder. + // Valid values include "user", "team", "schedule", and "escalation". + // This determines how OpsGenie interprets the other identifier fields. + // +kubebuilder:validation:MinLength=1 + // +kubebuilder:validation:Enum=team;teams;user;escalation;schedule + // +required + Type string `json:"type"` +} + +const opsgenieValidTypesRe = `^(team|teams|user|escalation|schedule)$` + +var opsgenieTypeMatcher = regexp.MustCompile(opsgenieValidTypesRe) + +// Validate ensures OpsGenieConfigResponder is valid. +func (r *OpsGenieConfigResponder) Validate() error { + if r.ID == "" && r.Name == "" && r.Username == "" { + return errors.New("responder must have at least an ID, a Name or an Username defined") + } + + if strings.Contains(r.Type, "{{") { + _, err := template.New("").Parse(r.Type) + if err != nil { + return fmt.Errorf("responder %v type is not a valid template: %w", r, err) + } + return nil + } + + if opsgenieTypeMatcher.MatchString(strings.ToLower(r.Type)) { + return nil + } + return fmt.Errorf("opsGenieConfig responder %v type does not match valid options %s", r, opsgenieValidTypesRe) +} + +// HTTPConfig defines a client HTTP configuration. +// See https://prometheus.io/docs/alerting/latest/configuration/#http_config +type HTTPConfig struct { + // authorization defines the authorization header configuration for the client. + // This is mutually exclusive with BasicAuth and is only available starting from Alertmanager v0.22+. + // +optional + Authorization *monitoringv1.SafeAuthorization `json:"authorization,omitempty"` + // basicAuth defines the basic authentication credentials for the client. + // This is mutually exclusive with Authorization. If both are defined, BasicAuth takes precedence. + // +optional + BasicAuth *monitoringv1.BasicAuth `json:"basicAuth,omitempty"` + // oauth2 defines the OAuth2 client credentials used to fetch a token for the targets. + // This enables OAuth2 authentication flow for HTTP requests. + // +optional + OAuth2 *monitoringv1.OAuth2 `json:"oauth2,omitempty"` + // bearerTokenSecret defines the secret's key that contains the bearer token to be used by the client + // for authentication. + // The secret needs to be in the same namespace as the AlertmanagerConfig + // object and accessible by the Prometheus Operator. + // +optional + BearerTokenSecret *SecretKeySelector `json:"bearerTokenSecret,omitempty"` + // tlsConfig defines the TLS configuration for the client. + // This includes settings for certificates, CA validation, and TLS protocol options. + // +optional + TLSConfig *monitoringv1.SafeTLSConfig `json:"tlsConfig,omitempty"` + + // proxyURL defines an optional proxy URL for HTTP requests. + // If defined, this field takes precedence over `proxyUrl`. + // + // +optional + ProxyURLOriginal *string `json:"proxyURL,omitempty"` + + monitoringv1.ProxyConfig `json:",inline"` + + // followRedirects defines whether HTTP requests follow HTTP 3xx redirects. + // When true, the client will automatically follow redirect responses. + // +optional + FollowRedirects *bool `json:"followRedirects,omitempty"` // nolint:kubeapilinter + + // enableHttp2 can be used to disable HTTP2. + // + // +optional + EnableHTTP2 *bool `json:"enableHttp2,omitempty"` // nolint:kubeapilinter +} + +// WebexConfig configures notification via Cisco Webex +// See https://prometheus.io/docs/alerting/latest/configuration/#webex_config +type WebexConfig struct { + // sendResolved defines whether or not to notify about resolved alerts. + // +optional + SendResolved *bool `json:"sendResolved,omitempty"` // nolint:kubeapilinter + + // apiURL defines the Webex Teams API URL i.e. https://webexapis.com/v1/messages + // +optional + APIURL *URL `json:"apiURL,omitempty"` + + // httpConfig defines the HTTP client's configuration. + // You must use this configuration to supply the bot token as part of the HTTP `Authorization` header. + // +optional + HTTPConfig *HTTPConfig `json:"httpConfig,omitempty"` + + // message defines the message template + // +optional + Message *string `json:"message,omitempty"` + + // roomID defines the ID of the Webex Teams room where to send the messages. + // +kubebuilder:validation:MinLength=1 + // +required + RoomID string `json:"roomID"` +} + +// WeChatConfig configures notifications via WeChat. +// See https://prometheus.io/docs/alerting/latest/configuration/#wechat_config +type WeChatConfig struct { + // sendResolved defines whether or not to notify about resolved alerts. + // +optional + SendResolved *bool `json:"sendResolved,omitempty"` // nolint:kubeapilinter + // apiSecret defines the secret's key that contains the WeChat API key. + // The secret needs to be in the same namespace as the AlertmanagerConfig + // object and accessible by the Prometheus Operator. + // +optional + APISecret *SecretKeySelector `json:"apiSecret,omitempty"` + // apiURL defines the WeChat API URL. + // When not specified, defaults to the standard WeChat Work API endpoint. + // +optional + APIURL *URL `json:"apiURL,omitempty"` + // corpID defines the corp id for authentication. + // This is the unique identifier for your WeChat Work organization. + // +optional + CorpID string `json:"corpID,omitempty"` + // agentID defines the application agent ID within WeChat Work. + // This identifies which WeChat Work application will send the notifications. + // +optional + AgentID string `json:"agentID,omitempty"` + // toUser defines the target user(s) to receive the notification. + // Can be a single user ID or multiple user IDs separated by '|'. + // +optional + ToUser string `json:"toUser,omitempty"` + // toParty defines the target department(s) to receive the notification. + // Can be a single department ID or multiple department IDs separated by '|'. + // +optional + ToParty string `json:"toParty,omitempty"` + // toTag defines the target tag(s) to receive the notification. + // Can be a single tag ID or multiple tag IDs separated by '|'. + // +optional + ToTag string `json:"toTag,omitempty"` + // message defines the API request data as defined by the WeChat API. + // This contains the actual notification content to be sent. + // +optional + Message string `json:"message,omitempty"` + // messageType defines the type of message to send. + // Valid values include "text", "markdown", and other WeChat Work supported message types. + // +optional + MessageType string `json:"messageType,omitempty"` + // httpConfig defines the HTTP client configuration for WeChat API requests. + // +optional + HTTPConfig *HTTPConfig `json:"httpConfig,omitempty"` +} + +// EmailConfig configures notifications via Email. +type EmailConfig struct { + // sendResolved defines whether or not to notify about resolved alerts. + // +optional + SendResolved *bool `json:"sendResolved,omitempty"` // nolint:kubeapilinter + // to defines the email address to send notifications to. + // This is the recipient address for alert notifications. + // +optional + To string `json:"to,omitempty"` + // from defines the sender address for email notifications. + // This appears as the "From" field in the email header. + // +optional + From string `json:"from,omitempty"` + // hello defines the hostname to identify to the SMTP server. + // This is used in the SMTP HELO/EHLO command during the connection handshake. + // +optional + Hello string `json:"hello,omitempty"` + // smarthost defines the SMTP host and port through which emails are sent. + // Format should be "hostname:port", e.g. "smtp.example.com:587". + // +optional + Smarthost string `json:"smarthost,omitempty"` + // authUsername defines the username to use for SMTP authentication. + // This is used for SMTP AUTH when the server requires authentication. + // +optional + AuthUsername string `json:"authUsername,omitempty"` + // authPassword defines the secret's key that contains the password to use for authentication. + // The secret needs to be in the same namespace as the AlertmanagerConfig + // object and accessible by the Prometheus Operator. + // +optional + AuthPassword *SecretKeySelector `json:"authPassword,omitempty"` + // authSecret defines the secret's key that contains the CRAM-MD5 secret. + // This is used for CRAM-MD5 authentication mechanism. + // The secret needs to be in the same namespace as the AlertmanagerConfig + // object and accessible by the Prometheus Operator. + // +optional + AuthSecret *SecretKeySelector `json:"authSecret,omitempty"` + // authIdentity defines the identity to use for SMTP authentication. + // This is typically used with PLAIN authentication mechanism. + // +optional + AuthIdentity string `json:"authIdentity,omitempty"` + // headers defines additional email header key/value pairs. + // These override any headers previously set by the notification implementation. + // +optional + Headers []KeyValue `json:"headers,omitempty"` + // html defines the HTML body of the email notification. + // This allows for rich formatting in the email content. + // +optional + HTML *string `json:"html,omitempty"` + // text defines the plain text body of the email notification. + // This provides a fallback for email clients that don't support HTML. + // +optional + Text *string `json:"text,omitempty"` + // requireTLS defines the SMTP TLS requirement. + // Note that Go does not support unencrypted connections to remote SMTP endpoints. + // +optional + RequireTLS *bool `json:"requireTLS,omitempty"` // nolint:kubeapilinter + // tlsConfig defines the TLS configuration for SMTP connections. + // This includes settings for certificates, CA validation, and TLS protocol options. + // +optional + TLSConfig *monitoringv1.SafeTLSConfig `json:"tlsConfig,omitempty"` +} + +// VictorOpsConfig configures notifications via VictorOps. +// See https://prometheus.io/docs/alerting/latest/configuration/#victorops_config +type VictorOpsConfig struct { + // sendResolved defines whether or not to notify about resolved alerts. + // +optional + SendResolved *bool `json:"sendResolved,omitempty"` // nolint:kubeapilinter + // apiKey defines the secret's key that contains the API key to use when talking to the VictorOps API. + // The secret needs to be in the same namespace as the AlertmanagerConfig + // object and accessible by the Prometheus Operator. + // +optional + APIKey *SecretKeySelector `json:"apiKey,omitempty"` + // apiUrl defines the VictorOps API URL. + // When not specified, defaults to the standard VictorOps API endpoint. + // +optional + APIURL *URL `json:"apiUrl,omitempty"` + // routingKey defines a key used to map the alert to a team. + // This determines which VictorOps team will receive the alert notification. + // +kubebuilder:validation:MinLength=1 + // +required + RoutingKey string `json:"routingKey"` + // messageType describes the behavior of the alert. + // Valid values are "CRITICAL", "WARNING", and "INFO". + // +kubebuilder:validation:MinLength=1 + // +optional + MessageType *string `json:"messageType,omitempty"` + // entityDisplayName contains a summary of the alerted problem. + // This appears as the main title or identifier for the incident. + // +kubebuilder:validation:MinLength=1 + // +optional + EntityDisplayName *string `json:"entityDisplayName,omitempty"` + // stateMessage contains a long explanation of the alerted problem. + // This provides detailed context about the incident. + // +kubebuilder:validation:MinLength=1 + // +optional + StateMessage *string `json:"stateMessage,omitempty"` + // monitoringTool defines the monitoring tool the state message is from. + // This helps identify the source system that generated the alert. + // +kubebuilder:validation:MinLength=1 + // +optional + MonitoringTool *string `json:"monitoringTool,omitempty"` + // customFields defines additional custom fields for notification. + // These provide extra metadata that will be included with the VictorOps incident. + // +listType=atomic + // +optional + CustomFields []KeyValue `json:"customFields,omitempty"` + // httpConfig defines the HTTP client's configuration for VictorOps API requests. + // +optional + HTTPConfig *HTTPConfig `json:"httpConfig,omitempty"` +} + +// PushoverConfig configures notifications via Pushover. +// See https://prometheus.io/docs/alerting/latest/configuration/#pushover_config +type PushoverConfig struct { + // sendResolved defines whether or not to notify about resolved alerts. + // +optional + SendResolved *bool `json:"sendResolved,omitempty"` // nolint:kubeapilinter + // userKey defines the secret's key that contains the recipient user's user key. + // The secret needs to be in the same namespace as the AlertmanagerConfig + // object and accessible by the Prometheus Operator. + // Either `userKey` or `userKeyFile` is required. + // +optional + UserKey *SecretKeySelector `json:"userKey,omitempty"` + // userKeyFile defines the user key file that contains the recipient user's user key. + // Either `userKey` or `userKeyFile` is required. + // It requires Alertmanager >= v0.26.0. + // +optional + UserKeyFile *string `json:"userKeyFile,omitempty"` + // token defines the secret's key that contains the registered application's API token. + // See https://pushover.net/apps for application registration. + // The secret needs to be in the same namespace as the AlertmanagerConfig + // object and accessible by the Prometheus Operator. + // Either `token` or `tokenFile` is required. + // +optional + Token *SecretKeySelector `json:"token,omitempty"` + // tokenFile defines the token file that contains the registered application's API token. + // See https://pushover.net/apps for application registration. + // Either `token` or `tokenFile` is required. + // It requires Alertmanager >= v0.26.0. + // +optional + TokenFile *string `json:"tokenFile,omitempty"` + // title defines the notification title displayed in the Pushover message. + // This appears as the bold header text in the notification. + // +kubebuilder:validation:MinLength=1 + // +optional + Title *string `json:"title,omitempty"` + // message defines the notification message content. + // This is the main body text of the Pushover notification. + // +kubebuilder:validation:MinLength=1 + // +optional + Message *string `json:"message,omitempty"` + // url defines a supplementary URL shown alongside the message. + // This creates a clickable link within the Pushover notification. + // +optional + URL string `json:"url,omitempty"` + // urlTitle defines a title for the supplementary URL. + // If not specified, the raw URL is shown instead. + // +kubebuilder:validation:MinLength=1 + // +optional + URLTitle *string `json:"urlTitle,omitempty"` + // ttl defines the time to live for the alert notification. + // This determines how long the notification remains active before expiring. + // +optional + TTL *monitoringv1.Duration `json:"ttl,omitempty"` + // device defines the name of a specific device to send the notification to. + // If not specified, the notification is sent to all user's devices. + // +kubebuilder:validation:MinLength=1 + // +optional + Device *string `json:"device,omitempty"` + // sound defines the name of one of the sounds supported by device clients. + // This overrides the user's default sound choice for this notification. + // +kubebuilder:validation:MinLength=1 + // +optional + Sound *string `json:"sound,omitempty"` + // priority defines the notification priority level. + // See https://pushover.net/api#priority for valid values and behavior. + // +kubebuilder:validation:MinLength=1 + // +optional + Priority *string `json:"priority,omitempty"` + // retry defines how often the Pushover servers will send the same notification to the user. + // Must be at least 30 seconds. Only applies to priority 2 notifications. + // +kubebuilder:validation:Pattern=`^(([0-9]+)y)?(([0-9]+)w)?(([0-9]+)d)?(([0-9]+)h)?(([0-9]+)m)?(([0-9]+)s)?(([0-9]+)ms)?$` + // +optional + Retry *string `json:"retry,omitempty"` + // expire defines how long your notification will continue to be retried for, + // unless the user acknowledges the notification. Only applies to priority 2 notifications. + // +kubebuilder:validation:Pattern=`^(([0-9]+)y)?(([0-9]+)w)?(([0-9]+)d)?(([0-9]+)h)?(([0-9]+)m)?(([0-9]+)s)?(([0-9]+)ms)?$` + // +optional + Expire *string `json:"expire,omitempty"` + // html defines whether notification message is HTML or plain text. + // When true, the message can include HTML formatting tags. + // html and monospace formatting are mutually exclusive. + // +optional + HTML *bool `json:"html,omitempty"` //nolint:kubeapilinter + // monospace optional HTML/monospace formatting for the message, see https://pushover.net/api#html + // html and monospace formatting are mutually exclusive. + // +optional + Monospace *bool `json:"monospace,omitempty"` //nolint:kubeapilinter + // httpConfig defines the HTTP client configuration for Pushover API requests. + // +optional + HTTPConfig *HTTPConfig `json:"httpConfig,omitempty"` +} + +// SNSConfig configures notifications via AWS SNS. +// See https://prometheus.io/docs/alerting/latest/configuration/#sns_configs +type SNSConfig struct { + // sendResolved defines whether or not to notify about resolved alerts. + // +optional + SendResolved *bool `json:"sendResolved,omitempty"` // nolint:kubeapilinter + // apiURL defines the SNS API URL, e.g. https://sns.us-east-2.amazonaws.com. + // If not specified, the SNS API URL from the SNS SDK will be used. + // +optional + ApiURL string `json:"apiURL,omitempty"` + // sigv4 configures AWS's Signature Verification 4 signing process to sign requests. + // This includes AWS credentials and region configuration for authentication. + // +optional + Sigv4 *monitoringv1.Sigv4 `json:"sigv4,omitempty"` + // topicARN defines the SNS topic ARN, e.g. arn:aws:sns:us-east-2:698519295917:My-Topic. + // If you don't specify this value, you must specify a value for the PhoneNumber or TargetARN. + // +optional + TopicARN string `json:"topicARN,omitempty"` + // subject defines the subject line when the message is delivered to email endpoints. + // This field is only used when sending to email subscribers of an SNS topic. + // +optional + Subject string `json:"subject,omitempty"` + // phoneNumber defines the phone number if message is delivered via SMS in E.164 format. + // If you don't specify this value, you must specify a value for the TopicARN or TargetARN. + // +optional + PhoneNumber string `json:"phoneNumber,omitempty"` + // targetARN defines the mobile platform endpoint ARN if message is delivered via mobile notifications. + // If you don't specify this value, you must specify a value for the TopicARN or PhoneNumber. + // +optional + TargetARN string `json:"targetARN,omitempty"` + // message defines the message content of the SNS notification. + // This is the actual notification text that will be sent to subscribers. + // +optional + Message string `json:"message,omitempty"` + // attributes defines SNS message attributes as key-value pairs. + // These provide additional metadata that can be used for message filtering and routing. + // +optional + //nolint:kubeapilinter + Attributes map[string]string `json:"attributes,omitempty"` + // httpConfig defines the HTTP client configuration for SNS API requests. + // +optional + HTTPConfig *HTTPConfig `json:"httpConfig,omitempty"` +} + +// TelegramConfig configures notifications via Telegram. +// See https://prometheus.io/docs/alerting/latest/configuration/#telegram_config +type TelegramConfig struct { + // sendResolved defines whether or not to notify about resolved alerts. + // +optional + SendResolved *bool `json:"sendResolved,omitempty"` // nolint:kubeapilinter + // apiURL defines the Telegram API URL, e.g. https://api.telegram.org. + // If not specified, the default Telegram API URL will be used. + // +optional + APIURL *URL `json:"apiURL,omitempty"` + // botToken defines the Telegram bot token. It is mutually exclusive with `botTokenFile`. + // The secret needs to be in the same namespace as the AlertmanagerConfig + // object and accessible by the Prometheus Operator. + // Either `botToken` or `botTokenFile` is required. + // +optional + BotToken *SecretKeySelector `json:"botToken,omitempty"` + // botTokenFile defines the file to read the Telegram bot token from. + // It is mutually exclusive with `botToken`. + // Either `botToken` or `botTokenFile` is required. + // It requires Alertmanager >= v0.26.0. + // +optional + BotTokenFile *string `json:"botTokenFile,omitempty"` + // chatID defines the Telegram chat ID where messages will be sent. + // This can be a user ID, group ID, or channel ID (with @ prefix for public channels). + // +required + ChatID int64 `json:"chatID,omitempty"` + // messageThreadID defines the Telegram Group Topic ID for threaded messages. + // This allows sending messages to specific topics within Telegram groups. + // It requires Alertmanager >= 0.26.0. + // +optional + MessageThreadID *int64 `json:"messageThreadID,omitempty"` + // message defines the message template for the Telegram notification. + // This is the content that will be sent to the specified chat. + // +optional + Message string `json:"message,omitempty"` + // disableNotifications controls whether Telegram notifications are sent silently. + // When true, users will receive the message without notification sounds. + // +optional + DisableNotifications *bool `json:"disableNotifications,omitempty"` // nolint:kubeapilinter + // parseMode defines the parse mode for telegram message formatting. + // Valid values are "MarkdownV2", "Markdown", and "HTML". + // This determines how text formatting is interpreted in the message. + //+kubebuilder:validation:Enum=MarkdownV2;Markdown;HTML + // +optional + ParseMode string `json:"parseMode,omitempty"` + // httpConfig defines the HTTP client configuration for Telegram API requests. + // +optional + HTTPConfig *HTTPConfig `json:"httpConfig,omitempty"` +} + +// MSTeamsConfig configures notifications via Microsoft Teams. +// It requires Alertmanager >= 0.26.0. +type MSTeamsConfig struct { + // sendResolved defines whether or not to notify about resolved alerts. + // +optional + SendResolved *bool `json:"sendResolved,omitempty"` // nolint:kubeapilinter + // webhookUrl defines the MSTeams webhook URL for sending notifications. + // This is the incoming webhook URL configured in your Teams channel. + // +required + WebhookURL v1.SecretKeySelector `json:"webhookUrl"` + // title defines the message title template for Teams notifications. + // This appears as the main heading of the Teams message card. + // +optional + Title *string `json:"title,omitempty"` + // summary defines the message summary template for Teams notifications. + // This provides a brief overview that appears in Teams notification previews. + // It requires Alertmanager >= 0.27.0. + // +optional + Summary *string `json:"summary,omitempty"` + // text defines the message body template for Teams notifications. + // This contains the detailed content of the Teams message. + // +optional + Text *string `json:"text,omitempty"` + // httpConfig defines the HTTP client configuration for Teams webhook requests. + // +optional + HTTPConfig *HTTPConfig `json:"httpConfig,omitempty"` +} + +// MSTeamsV2Config configures notifications via Microsoft Teams using the new message format with adaptive cards as required by flows. +// See https://prometheus.io/docs/alerting/latest/configuration/#msteamsv2_config +// It requires Alertmanager >= 0.28.0. +type MSTeamsV2Config struct { + // sendResolved defines whether or not to notify about resolved alerts. + // +optional + SendResolved *bool `json:"sendResolved,omitempty"` // nolint:kubeapilinter + // webhookURL defines the MSTeams incoming webhook URL for adaptive card notifications. + // This webhook must support the newer adaptive cards format required by Teams flows. + // +optional + WebhookURL *v1.SecretKeySelector `json:"webhookURL,omitempty"` + // title defines the message title template for adaptive card notifications. + // This appears as the main heading in the Teams adaptive card. + // +kubebuilder:validation:MinLength=1 + // +optional + Title *string `json:"title,omitempty"` + // text defines the message body template for adaptive card notifications. + // This contains the detailed content displayed in the Teams adaptive card format. + // +kubebuilder:validation:MinLength=1 + // +optional + Text *string `json:"text,omitempty"` + // httpConfig defines the HTTP client configuration for Teams webhook requests. + // +optional + HTTPConfig *HTTPConfig `json:"httpConfig,omitempty"` +} + +// RocketChatConfig configures notifications via RocketChat. +// It requires Alertmanager >= 0.28.0. +type RocketChatConfig struct { + // sendResolved defines whether or not to notify about resolved alerts. + // +optional + SendResolved *bool `json:"sendResolved,omitempty"` // nolint:kubeapilinter + // apiURL defines the API URL for RocketChat. + // Defaults to https://open.rocket.chat/ if not specified. + // +optional + APIURL *URL `json:"apiURL,omitempty"` + // channel defines the channel to send alerts to. + // This can be a channel name (e.g., "#alerts") or a direct message recipient. + // +kubebuilder:validation:MinLength=1 + // +optional + Channel *string `json:"channel,omitempty"` + // token defines the sender token for RocketChat authentication. + // This is the personal access token or bot token used to authenticate API requests. + // +required + Token v1.SecretKeySelector `json:"token,omitempty"` + // tokenID defines the sender token ID for RocketChat authentication. + // This is the user ID associated with the token used for API requests. + // +required + TokenID v1.SecretKeySelector `json:"tokenID,omitempty"` + // color defines the message color displayed in RocketChat. + // This appears as a colored bar alongside the message. + // +kubebuilder:validation:MinLength=1 + // +optional + Color *string `json:"color,omitempty"` + // emoji defines the emoji to be displayed as an avatar. + // If provided, this emoji will be used instead of the default avatar or iconURL. + // +kubebuilder:validation:MinLength=1 + // +optional + Emoji *string `json:"emoji,omitempty"` + // iconURL defines the icon URL for the message avatar. + // This displays a custom image as the message sender's avatar. + // +optional + IconURL *string `json:"iconURL,omitempty"` + // text defines the message text to send. + // This is optional because attachments can be used instead of or alongside text. + // +kubebuilder:validation:MinLength=1 + // +optional + Text *string `json:"text,omitempty"` + // title defines the message title displayed prominently in the message. + // This appears as bold text at the top of the message attachment. + // +kubebuilder:validation:MinLength=1 + // +optional + Title *string `json:"title,omitempty"` + // titleLink defines the URL that the title will link to when clicked. + // This makes the message title clickable in the RocketChat interface. + // +kubebuilder:validation:MinLength=1 + // +optional + TitleLink *string `json:"titleLink,omitempty"` + // fields defines additional fields for the message attachment. + // These appear as structured key-value pairs within the message. + // +kubebuilder:validation:MinItems=1 + // +optional + Fields []RocketChatFieldConfig `json:"fields,omitempty"` + // shortFields defines whether to use short fields in the message layout. + // When true, fields may be displayed side by side to save space. + // +optional + ShortFields *bool `json:"shortFields,omitempty"` // nolint:kubeapilinter + // imageURL defines the image URL to display within the message. + // This embeds an image directly in the message attachment. + // +optional + ImageURL *string `json:"imageURL,omitempty"` + // thumbURL defines the thumbnail URL for the message. + // This displays a small thumbnail image alongside the message content. + // +optional + ThumbURL *string `json:"thumbURL,omitempty"` + // linkNames defines whether to enable automatic linking of usernames and channels. + // When true, @username and #channel references become clickable links. + // +optional + LinkNames *bool `json:"linkNames,omitempty"` // nolint:kubeapilinter + // actions defines interactive actions to include in the message. + // These appear as buttons that users can click to trigger responses. + // +kubebuilder:validation:MinItems=1 + // +optional + Actions []RocketChatActionConfig `json:"actions,omitempty"` + // httpConfig defines the HTTP client configuration for RocketChat API requests. + // +optional + HTTPConfig *HTTPConfig `json:"httpConfig,omitempty"` +} + +// RocketChatFieldConfig defines a field for RocketChat messages. +type RocketChatFieldConfig struct { + // title defines the title of this field. + // This appears as bold text labeling the field content. + // +kubebuilder:validation:MinLength=1 + // +optional + Title *string `json:"title,omitempty"` + // value defines the value of this field, displayed underneath the title. + // This contains the actual data or content for the field. + // +kubebuilder:validation:MinLength=1 + // +optional + Value *string `json:"value,omitempty"` + // short defines whether this field should be a short field. + // When true, the field may be displayed inline with other short fields to save space. + // +optional + Short *bool `json:"short,omitempty"` // nolint:kubeapilinter +} + +// RocketChatActionConfig defines actions for RocketChat messages. +type RocketChatActionConfig struct { + // text defines the button text displayed to users. + // This is the label that appears on the interactive button. + // +kubebuilder:validation:MinLength=1 + // +optional + Text *string `json:"text,omitempty"` + // url defines the URL the button links to when clicked. + // This creates a clickable button that opens the specified URL. + // +optional + URL *string `json:"url,omitempty"` + // msg defines the message to send when the button is clicked. + // This allows the button to post a predefined message to the channel. + // +kubebuilder:validation:MinLength=1 + // +optional + Msg *string `json:"msg,omitempty"` +} + +// InhibitRule defines an inhibition rule that allows to mute alerts when other +// alerts are already firing. +// See https://prometheus.io/docs/alerting/latest/configuration/#inhibit_rule +type InhibitRule struct { + // targetMatch defines matchers that have to be fulfilled in the alerts to be muted. + // The operator enforces that the alert matches the resource's namespace. + // When these conditions are met, matching alerts will be inhibited (silenced). + // +optional + TargetMatch []Matcher `json:"targetMatch,omitempty"` + // sourceMatch defines matchers for which one or more alerts have to exist for the inhibition + // to take effect. The operator enforces that the alert matches the resource's namespace. + // These are the "trigger" alerts that cause other alerts to be inhibited. + // +optional + SourceMatch []Matcher `json:"sourceMatch,omitempty"` + // equal defines labels that must have an equal value in the source and target alert + // for the inhibition to take effect. This ensures related alerts are properly grouped. + // +optional + Equal []string `json:"equal,omitempty"` +} + +// KeyValue defines a (key, value) tuple. +type KeyValue struct { + // key defines the key of the tuple. + // This is the identifier or name part of the key-value pair. + // +kubebuilder:validation:MinLength=1 + // +required + Key string `json:"key"` + // value defines the value of the tuple. + // This is the data or content associated with the key. + // +required + Value string `json:"value"` +} + +// Matcher defines how to match on alert's labels. +type Matcher struct { + // name defines the label to match. + // This specifies which alert label should be evaluated. + // +kubebuilder:validation:MinLength=1 + // +required + Name string `json:"name"` + // value defines the label value to match. + // This is the expected value for the specified label. + // +optional + Value string `json:"value"` + // matchType defines the match operation available with AlertManager >= v0.22.0. + // Takes precedence over Regex (deprecated) if non-empty. + // Valid values: "=" (equality), "!=" (inequality), "=~" (regex match), "!~" (regex non-match). + // +kubebuilder:validation:Enum=!=;=;=~;!~ + // +optional + MatchType MatchType `json:"matchType,omitempty"` +} + +// String returns Matcher as a string +// Use only for MatchType Matcher +func (in Matcher) String() string { + return fmt.Sprintf(`%s%s"%s"`, in.Name, in.MatchType, openMetricsEscape(in.Value)) +} + +// Validate the Matcher returns an error if the matcher is invalid +// Validates only non-deprecated matching fields +func (in Matcher) Validate() error { + // nothing to do + if in.MatchType == "" { + return nil + } + + if !in.MatchType.Valid() { + return fmt.Errorf("invalid 'matchType' '%s' provided'", in.MatchType) + } + + if strings.TrimSpace(in.Name) == "" { + return errors.New("matcher 'name' is required") + } + + return nil +} + +// MatchType is a comparison operator on a Matcher +type MatchType string + +// Valid MatchType returns true if the operator is acceptable +func (mt MatchType) Valid() bool { + _, ok := validMatchTypes[mt] + return ok +} + +// SecretKeySelector selects a key of a Secret. +type SecretKeySelector struct { + // name defines the name of the secret in the object's namespace to select from. + // +kubebuilder:validation:MinLength=1 + // +required + Name string `json:"name"` + // key defines the key of the secret to select from. Must be a valid secret key. + // +kubebuilder:validation:MinLength=1 + // +required + Key string `json:"key"` +} + +// DeepCopyObject implements the runtime.Object interface. +func (l *AlertmanagerConfig) DeepCopyObject() runtime.Object { + return l.DeepCopy() +} + +// DeepCopyObject implements the runtime.Object interface. +func (l *AlertmanagerConfigList) DeepCopyObject() runtime.Object { + return l.DeepCopy() +} + +const ( + MatchEqual MatchType = "=" + MatchNotEqual MatchType = "!=" + MatchRegexp MatchType = "=~" + MatchNotRegexp MatchType = "!~" +) + +var validMatchTypes = map[MatchType]bool{ + MatchEqual: true, + MatchNotEqual: true, + MatchRegexp: true, + MatchNotRegexp: true, +} + +// openMetricsEscape is similar to the usual string escaping, but more +// restricted. It merely replaces a new-line character with '\n', a double-quote +// character with '\"', and a backslash with '\\', which is the escaping used by +// OpenMetrics. +// * Copied from alertmanager codebase pkg/labels * +func openMetricsEscape(s string) string { + r := strings.NewReplacer( + `\`, `\\`, + "\n", `\n`, + `"`, `\"`, + ) + return r.Replace(s) +} + +// TimeInterval specifies the periods in time when notifications will be muted or active. +type TimeInterval struct { + // name of the time interval. + // +required + Name string `json:"name,omitempty"` + // timeIntervals defines a list of TimePeriod. + // +optional + TimeIntervals []TimePeriod `json:"timeIntervals,omitempty"` +} + +// TimePeriod describes periods of time. +type TimePeriod struct { + // times defines a list of TimeRange + // +optional + Times []TimeRange `json:"times,omitempty"` + // weekdays defines a list of WeekdayRange + // +optional + Weekdays []WeekdayRange `json:"weekdays,omitempty"` + // daysOfMonth defines a list of DayOfMonthRange + // +optional + DaysOfMonth []DayOfMonthRange `json:"daysOfMonth,omitempty"` + // months defines a list of MonthRange + // +optional + Months []MonthRange `json:"months,omitempty"` + // years defines a list of YearRange + // +optional + Years []YearRange `json:"years,omitempty"` +} + +// Time defines a time in 24hr format +// +kubebuilder:validation:Pattern=`^((([01][0-9])|(2[0-3])):[0-5][0-9])$|(^24:00$)` +type Time string + +// TimeRange defines a start and end time in 24hr format +type TimeRange struct { + // startTime defines the start time in 24hr format. + // +optional + StartTime Time `json:"startTime,omitempty"` + // endTime defines the end time in 24hr format. + // +optional + EndTime Time `json:"endTime,omitempty"` +} + +// WeekdayRange is an inclusive range of days of the week beginning on Sunday +// Days can be specified by name (e.g 'Sunday') or as an inclusive range (e.g 'Monday:Friday') +// +kubebuilder:validation:Pattern=`^((?i)sun|mon|tues|wednes|thurs|fri|satur)day(?:((:(sun|mon|tues|wednes|thurs|fri|satur)day)$)|$)` +type WeekdayRange string + +// DayOfMonthRange is an inclusive range of days of the month beginning at 1 +type DayOfMonthRange struct { + // start of the inclusive range + // +kubebuilder:validation:Minimum=-31 + // +kubebuilder:validation:Maximum=31 + // +optional + Start int `json:"start,omitempty"` + // end of the inclusive range + // +kubebuilder:validation:Minimum=-31 + // +kubebuilder:validation:Maximum=31 + // +optional + End int `json:"end,omitempty"` +} + +// MonthRange is an inclusive range of months of the year beginning in January +// Months can be specified by name (e.g 'January') by numerical month (e.g '1') or as an inclusive range (e.g 'January:March', '1:3', '1:March') +// +kubebuilder:validation:Pattern=`^((?i)january|february|march|april|may|june|july|august|september|october|november|december|1[0-2]|[1-9])(?:((:((?i)january|february|march|april|may|june|july|august|september|october|november|december|1[0-2]|[1-9]))$)|$)` +type MonthRange string + +// YearRange is an inclusive range of years +// +kubebuilder:validation:Pattern=`^2\d{3}(?::2\d{3}|$)` +type YearRange string + +// Weekday is day of the week +type Weekday string + +const ( + Sunday Weekday = "sunday" + Monday Weekday = "monday" + Tuesday Weekday = "tuesday" + Wednesday Weekday = "wednesday" + Thursday Weekday = "thursday" + Friday Weekday = "friday" + Saturday Weekday = "saturday" +) + +var daysOfWeek = map[Weekday]int{ + Sunday: 0, + Monday: 1, + Tuesday: 2, + Wednesday: 3, + Thursday: 4, + Friday: 5, + Saturday: 6, +} + +var daysOfWeekInv = map[int]Weekday{ + 0: Sunday, + 1: Monday, + 2: Tuesday, + 3: Wednesday, + 4: Thursday, + 5: Friday, + 6: Saturday, +} + +// Month of the year +type Month string + +const ( + January Month = "january" + February Month = "february" + March Month = "march" + April Month = "april" + May Month = "may" + June Month = "june" + July Month = "july" + August Month = "august" + September Month = "september" + October Month = "october" + November Month = "november" + December Month = "december" +) + +var months = map[Month]int{ + January: 1, + February: 2, + March: 3, + April: 4, + May: 5, + June: 6, + July: 7, + August: 8, + September: 9, + October: 10, + November: 11, + December: 12, +} + +var monthsInv = map[int]Month{ + 1: January, + 2: February, + 3: March, + 4: April, + 5: May, + 6: June, + 7: July, + 8: August, + 9: September, + 10: October, + 11: November, + 12: December, +} + +// URL represents a valid URL +// +kubebuilder:validation:Pattern=`^https?://.+$` +type URL string diff --git a/vendor/github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring/v1beta1/conversion_from.go b/vendor/github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring/v1beta1/conversion_from.go new file mode 100644 index 0000000000..e15d22beab --- /dev/null +++ b/vendor/github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring/v1beta1/conversion_from.go @@ -0,0 +1,689 @@ +// Copyright 2022 The prometheus-operator Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package v1beta1 + +import ( + "encoding/json" + "fmt" + + v1 "k8s.io/api/core/v1" + apiextensionsv1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1" + "sigs.k8s.io/controller-runtime/pkg/conversion" + + "github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring/v1alpha1" +) + +func convertRouteFrom(in *v1alpha1.Route) (*Route, error) { + if in == nil { + return nil, nil + } + + out := &Route{ + Receiver: in.Receiver, + Continue: in.Continue, + GroupBy: in.GroupBy, + GroupWait: in.GroupWait, + GroupInterval: in.GroupInterval, + RepeatInterval: in.RepeatInterval, + Matchers: convertMatchersFrom(in.Matchers), + MuteTimeIntervals: in.MuteTimeIntervals, + ActiveTimeIntervals: in.ActiveTimeIntervals, + } + + // Deserialize child routes to convert them to v1alpha1 and serialize back. + crs, err := in.ChildRoutes() + if err != nil { + return nil, err + } + + out.Routes = make([]apiextensionsv1.JSON, 0, len(in.Routes)) + for i := range crs { + cr, err := convertRouteFrom(&crs[i]) + if err != nil { + return nil, fmt.Errorf("route[%d]: %w", i, err) + } + + b, err := json.Marshal(cr) + if err != nil { + return nil, fmt.Errorf("route[%d]: %w", i, err) + } + + out.Routes = append(out.Routes, apiextensionsv1.JSON{Raw: b}) + } + + return out, nil +} + +func convertMatchersFrom(in []v1alpha1.Matcher) []Matcher { + out := make([]Matcher, 0, len(in)) + + for _, m := range in { + mt := m.MatchType + if mt == "" { + mt = "=" + if m.Regex { + mt = "=~" + } + } + out = append( + out, + Matcher{ + Name: m.Name, + Value: m.Value, + MatchType: MatchType(mt), + }, + ) + } + + return out +} + +func convertTimeIntervalsFrom(in []v1alpha1.TimeInterval) []TimePeriod { + out := make([]TimePeriod, 0, len(in)) + + for _, ti := range in { + var ( + trs = make([]TimeRange, 0, len(ti.Times)) + wds = make([]WeekdayRange, 0, len(ti.Weekdays)) + doms = make([]DayOfMonthRange, 0, len(ti.DaysOfMonth)) + mrs = make([]MonthRange, 0, len(ti.Months)) + yrs = make([]YearRange, 0, len(ti.Years)) + ) + + for _, tr := range ti.Times { + trs = append(trs, TimeRange{StartTime: Time(tr.StartTime), EndTime: Time(tr.EndTime)}) + } + + for _, wd := range ti.Weekdays { + wds = append(wds, WeekdayRange(wd)) + } + + for _, dm := range ti.DaysOfMonth { + doms = append(doms, DayOfMonthRange{Start: dm.Start, End: dm.End}) + } + + for _, mr := range ti.Months { + mrs = append(mrs, MonthRange(mr)) + } + + for _, yr := range ti.Years { + yrs = append(yrs, YearRange(yr)) + } + + out = append( + out, + TimePeriod{ + Times: trs, + Weekdays: wds, + DaysOfMonth: doms, + Months: mrs, + Years: yrs, + }, + ) + } + + return out +} + +func convertHTTPConfigFrom(in *v1alpha1.HTTPConfig) *HTTPConfig { + if in == nil { + return nil + } + + return &HTTPConfig{ + Authorization: in.Authorization, + BasicAuth: in.BasicAuth, + OAuth2: in.OAuth2, + BearerTokenSecret: convertSecretKeySelectorFrom(in.BearerTokenSecret), + TLSConfig: in.TLSConfig, + ProxyURLOriginal: in.ProxyURLOriginal, + ProxyConfig: in.ProxyConfig, + FollowRedirects: in.FollowRedirects, + EnableHTTP2: in.EnableHTTP2, + } +} + +func convertKeyValuesFrom(in []v1alpha1.KeyValue) []KeyValue { + out := make([]KeyValue, len(in)) + + for i := range in { + out[i] = KeyValue{ + Key: in[i].Key, + Value: in[i].Value, + } + } + + return out +} + +func convertSecretKeySelectorFrom(in *v1.SecretKeySelector) *SecretKeySelector { + if in == nil { + return nil + } + + return &SecretKeySelector{ + Name: in.Name, + Key: in.Key, + } +} + +func convertOpsGenieConfigRespondersFrom(in []v1alpha1.OpsGenieConfigResponder) []OpsGenieConfigResponder { + out := make([]OpsGenieConfigResponder, len(in)) + + for i := range in { + out[i] = OpsGenieConfigResponder{ + ID: in[i].ID, + Name: in[i].Name, + Username: in[i].Username, + Type: in[i].Type, + } + } + + return out +} + +func convertOpsGenieConfigFrom(in v1alpha1.OpsGenieConfig) OpsGenieConfig { + return OpsGenieConfig{ + SendResolved: in.SendResolved, + APIKey: convertSecretKeySelectorFrom(in.APIKey), + APIURL: (*URL)(in.APIURL), + Message: in.Message, + Description: in.Description, + Source: in.Source, + Tags: in.Tags, + Note: in.Note, + Priority: in.Priority, + Details: convertKeyValuesFrom(in.Details), + Responders: convertOpsGenieConfigRespondersFrom(in.Responders), + HTTPConfig: convertHTTPConfigFrom(in.HTTPConfig), + Entity: in.Entity, + Actions: in.Actions, + } +} + +func convertPagerDutyImageConfigsFrom(in []v1alpha1.PagerDutyImageConfig) []PagerDutyImageConfig { + out := make([]PagerDutyImageConfig, len(in)) + + for i := range in { + out[i] = PagerDutyImageConfig{ + Src: in[i].Src, + Href: in[i].Href, + Alt: in[i].Alt, + } + } + + return out +} + +func convertPagerDutyLinkConfigsFrom(in []v1alpha1.PagerDutyLinkConfig) []PagerDutyLinkConfig { + out := make([]PagerDutyLinkConfig, len(in)) + + for i := range in { + out[i] = PagerDutyLinkConfig{ + Href: (in[i].Href), + Text: in[i].Text, + } + } + + return out +} + +func convertPagerDutyConfigFrom(in v1alpha1.PagerDutyConfig) PagerDutyConfig { + return PagerDutyConfig{ + SendResolved: in.SendResolved, + RoutingKey: convertSecretKeySelectorFrom(in.RoutingKey), + ServiceKey: convertSecretKeySelectorFrom(in.ServiceKey), + URL: (*URL)(in.URL), + Client: in.Client, + ClientURL: (in.ClientURL), + Description: in.Description, + Severity: in.Severity, + Class: in.Class, + Group: in.Group, + Component: in.Component, + Details: convertKeyValuesFrom(in.Details), + PagerDutyImageConfigs: convertPagerDutyImageConfigsFrom(in.PagerDutyImageConfigs), + PagerDutyLinkConfigs: convertPagerDutyLinkConfigsFrom(in.PagerDutyLinkConfigs), + HTTPConfig: convertHTTPConfigFrom(in.HTTPConfig), + Source: in.Source, + Timeout: in.Timeout, + } +} + +func convertDiscordConfigFrom(in v1alpha1.DiscordConfig) DiscordConfig { + return DiscordConfig{ + APIURL: in.APIURL, + HTTPConfig: convertHTTPConfigFrom(in.HTTPConfig), + Title: in.Title, + Message: in.Message, + SendResolved: in.SendResolved, + Content: in.Content, + Username: in.Username, + AvatarURL: (*URL)(in.AvatarURL), + } +} + +func convertRocketChatFieldConfigsFrom(in []v1alpha1.RocketChatFieldConfig) []RocketChatFieldConfig { + if in == nil { + return nil + } + out := make([]RocketChatFieldConfig, len(in)) + for i, field := range in { + out[i] = RocketChatFieldConfig{ + Title: field.Title, + Value: field.Value, + Short: field.Short, + } + } + return out +} + +func convertRocketChatActionConfigsFrom(in []v1alpha1.RocketChatActionConfig) []RocketChatActionConfig { + if in == nil { + return nil + } + out := make([]RocketChatActionConfig, len(in)) + for i, action := range in { + out[i] = RocketChatActionConfig{ + Text: action.Text, + URL: action.URL, + Msg: action.Msg, + } + } + return out +} + +func convertRocketchatConfigFrom(in v1alpha1.RocketChatConfig) RocketChatConfig { + return RocketChatConfig{ + SendResolved: in.SendResolved, + APIURL: (*URL)(in.APIURL), + Channel: in.Channel, + Token: in.Token, + TokenID: in.TokenID, + Color: in.Color, + Emoji: in.Emoji, + IconURL: in.IconURL, + Text: in.Text, + Title: in.Title, + TitleLink: in.TitleLink, + Fields: convertRocketChatFieldConfigsFrom(in.Fields), + ShortFields: in.ShortFields, + ImageURL: in.ImageURL, + ThumbURL: in.ThumbURL, + LinkNames: in.LinkNames, + Actions: convertRocketChatActionConfigsFrom(in.Actions), + HTTPConfig: convertHTTPConfigFrom(in.HTTPConfig), + } +} + +func convertSlackFieldsFrom(in []v1alpha1.SlackField) []SlackField { + out := make([]SlackField, len(in)) + + for i := range in { + out[i] = SlackField{ + Title: in[i].Title, + Value: in[i].Value, + Short: in[i].Short, + } + } + + return out +} + +func convertSlackActionsFrom(in []v1alpha1.SlackAction) []SlackAction { + out := make([]SlackAction, len(in)) + + for i := range in { + out[i] = SlackAction{ + Type: in[i].Type, + Text: in[i].Text, + URL: in[i].URL, + Style: in[i].Style, + Name: in[i].Name, + Value: in[i].Value, + } + if in[i].ConfirmField != nil { + out[i].ConfirmField = &SlackConfirmationField{ + Text: in[i].ConfirmField.Text, + Title: in[i].ConfirmField.Title, + OkText: in[i].ConfirmField.OkText, + DismissText: in[i].ConfirmField.DismissText, + } + } + } + + return out +} + +func convertSlackConfigFrom(in v1alpha1.SlackConfig) SlackConfig { + return SlackConfig{ + SendResolved: in.SendResolved, + APIURL: convertSecretKeySelectorFrom(in.APIURL), + Channel: in.Channel, + Username: in.Username, + Color: in.Color, + Title: in.Title, + TitleLink: in.TitleLink, + Pretext: in.Pretext, + Text: in.Text, + Fields: convertSlackFieldsFrom(in.Fields), + ShortFields: in.ShortFields, + Footer: in.Footer, + Fallback: in.Fallback, + CallbackID: in.CallbackID, + IconEmoji: in.IconEmoji, + IconURL: in.IconURL, + ImageURL: in.ImageURL, + ThumbURL: in.ThumbURL, + LinkNames: in.LinkNames, + MrkdwnIn: in.MrkdwnIn, + Actions: convertSlackActionsFrom(in.Actions), + HTTPConfig: convertHTTPConfigFrom(in.HTTPConfig), + Timeout: in.Timeout, + } +} + +func convertWebexConfigFrom(in v1alpha1.WebexConfig) WebexConfig { + return WebexConfig{ + APIURL: (*URL)(in.APIURL), + HTTPConfig: convertHTTPConfigFrom(in.HTTPConfig), + Message: in.Message, + RoomID: in.RoomID, + SendResolved: in.SendResolved, + } +} + +func convertWebhookConfigFrom(in v1alpha1.WebhookConfig) WebhookConfig { + return WebhookConfig{ + SendResolved: in.SendResolved, + URL: in.URL, + URLSecret: convertSecretKeySelectorFrom(in.URLSecret), + HTTPConfig: convertHTTPConfigFrom(in.HTTPConfig), + MaxAlerts: in.MaxAlerts, + Timeout: in.Timeout, + } +} + +func convertWeChatConfigFrom(in v1alpha1.WeChatConfig) WeChatConfig { + return WeChatConfig{ + SendResolved: in.SendResolved, + APISecret: convertSecretKeySelectorFrom(in.APISecret), + APIURL: (*URL)(in.APIURL), + CorpID: in.CorpID, + AgentID: in.AgentID, + ToUser: in.ToUser, + ToParty: in.ToParty, + ToTag: in.ToTag, + Message: in.Message, + MessageType: in.MessageType, + HTTPConfig: convertHTTPConfigFrom(in.HTTPConfig), + } +} + +func convertEmailConfigFrom(in v1alpha1.EmailConfig) EmailConfig { + return EmailConfig{ + SendResolved: in.SendResolved, + To: in.To, + From: in.From, + Hello: in.Hello, + Smarthost: in.Smarthost, + AuthUsername: in.AuthUsername, + AuthPassword: convertSecretKeySelectorFrom(in.AuthPassword), + AuthSecret: convertSecretKeySelectorFrom(in.AuthSecret), + AuthIdentity: in.AuthIdentity, + Headers: convertKeyValuesFrom(in.Headers), + HTML: in.HTML, + Text: in.Text, + RequireTLS: in.RequireTLS, + TLSConfig: in.TLSConfig, + } +} + +func convertVictorOpsConfigFrom(in v1alpha1.VictorOpsConfig) VictorOpsConfig { + return VictorOpsConfig{ + SendResolved: in.SendResolved, + APIKey: convertSecretKeySelectorFrom(in.APIKey), + APIURL: (*URL)(in.APIURL), + RoutingKey: in.RoutingKey, + MessageType: in.MessageType, + EntityDisplayName: in.EntityDisplayName, + StateMessage: in.StateMessage, + MonitoringTool: in.MonitoringTool, + CustomFields: convertKeyValuesFrom(in.CustomFields), + HTTPConfig: convertHTTPConfigFrom(in.HTTPConfig), + } +} + +func convertPushoverConfigFrom(in v1alpha1.PushoverConfig) PushoverConfig { + return PushoverConfig{ + SendResolved: in.SendResolved, + UserKey: convertSecretKeySelectorFrom(in.UserKey), + UserKeyFile: in.UserKeyFile, + Token: convertSecretKeySelectorFrom(in.Token), + TokenFile: in.TokenFile, + Title: in.Title, + Message: in.Message, + URL: in.URL, + URLTitle: in.URLTitle, + Device: in.Device, + Sound: in.Sound, + Priority: in.Priority, + Retry: in.Retry, + Expire: in.Expire, + HTML: in.HTML, + Monospace: in.Monospace, + HTTPConfig: convertHTTPConfigFrom(in.HTTPConfig), + } +} + +func convertSNSConfigFrom(in v1alpha1.SNSConfig) SNSConfig { + return SNSConfig{ + SendResolved: in.SendResolved, + ApiURL: in.ApiURL, + Sigv4: in.Sigv4, + TopicARN: in.TopicARN, + Subject: in.Subject, + PhoneNumber: in.PhoneNumber, + TargetARN: in.TargetARN, + Message: in.Message, + Attributes: in.Attributes, + HTTPConfig: convertHTTPConfigFrom(in.HTTPConfig), + } +} + +func convertTelegramConfigFrom(in v1alpha1.TelegramConfig) TelegramConfig { + return TelegramConfig{ + SendResolved: in.SendResolved, + APIURL: (*URL)(in.APIURL), + BotToken: convertSecretKeySelectorFrom(in.BotToken), + BotTokenFile: in.BotTokenFile, + ChatID: in.ChatID, + MessageThreadID: in.MessageThreadID, + Message: in.Message, + DisableNotifications: in.DisableNotifications, + ParseMode: in.ParseMode, + HTTPConfig: convertHTTPConfigFrom(in.HTTPConfig), + } +} + +func convertMSTeamsConfigFrom(in v1alpha1.MSTeamsConfig) MSTeamsConfig { + return MSTeamsConfig{ + SendResolved: in.SendResolved, + WebhookURL: in.WebhookURL, + Title: in.Title, + Summary: in.Summary, + Text: in.Text, + HTTPConfig: convertHTTPConfigFrom(in.HTTPConfig), + } +} + +func convertMSTeamsV2ConfigFrom(in v1alpha1.MSTeamsV2Config) MSTeamsV2Config { + return MSTeamsV2Config{ + SendResolved: in.SendResolved, + WebhookURL: in.WebhookURL, + Title: in.Title, + Text: in.Text, + HTTPConfig: convertHTTPConfigFrom(in.HTTPConfig), + } +} + +// ConvertFrom converts from the Hub version (v1alpha1) to this version (v1beta1). +func (dst *AlertmanagerConfig) ConvertFrom(srcRaw conversion.Hub) error { + src := srcRaw.(*v1alpha1.AlertmanagerConfig) + + dst.ObjectMeta = src.ObjectMeta + + for _, in := range src.Spec.Receivers { + out := Receiver{ + Name: in.Name, + } + + for _, in := range in.OpsGenieConfigs { + out.OpsGenieConfigs = append( + out.OpsGenieConfigs, + convertOpsGenieConfigFrom(in), + ) + } + + for _, in := range in.PagerDutyConfigs { + out.PagerDutyConfigs = append( + out.PagerDutyConfigs, + convertPagerDutyConfigFrom(in), + ) + } + + for _, in := range in.DiscordConfigs { + out.DiscordConfigs = append( + out.DiscordConfigs, + convertDiscordConfigFrom(in), + ) + } + + for _, in := range in.SlackConfigs { + out.SlackConfigs = append( + out.SlackConfigs, + convertSlackConfigFrom(in), + ) + } + + for _, in := range in.WebexConfigs { + out.WebexConfigs = append( + out.WebexConfigs, + convertWebexConfigFrom(in), + ) + } + + for _, in := range in.WebhookConfigs { + out.WebhookConfigs = append( + out.WebhookConfigs, + convertWebhookConfigFrom(in), + ) + } + + for _, in := range in.WeChatConfigs { + out.WeChatConfigs = append( + out.WeChatConfigs, + convertWeChatConfigFrom(in), + ) + } + + for _, in := range in.EmailConfigs { + out.EmailConfigs = append( + out.EmailConfigs, + convertEmailConfigFrom(in), + ) + } + + for _, in := range in.VictorOpsConfigs { + out.VictorOpsConfigs = append( + out.VictorOpsConfigs, + convertVictorOpsConfigFrom(in), + ) + } + + for _, in := range in.PushoverConfigs { + out.PushoverConfigs = append( + out.PushoverConfigs, + convertPushoverConfigFrom(in), + ) + } + + for _, in := range in.SNSConfigs { + out.SNSConfigs = append( + out.SNSConfigs, + convertSNSConfigFrom(in), + ) + } + + for _, in := range in.TelegramConfigs { + out.TelegramConfigs = append( + out.TelegramConfigs, + convertTelegramConfigFrom(in), + ) + } + + for _, in := range in.MSTeamsConfigs { + out.MSTeamsConfigs = append( + out.MSTeamsConfigs, + convertMSTeamsConfigFrom(in), + ) + } + + for _, in := range in.MSTeamsV2Configs { + out.MSTeamsV2Configs = append( + out.MSTeamsV2Configs, + convertMSTeamsV2ConfigFrom(in), + ) + } + + for _, in := range in.RocketChatConfigs { + out.RocketChatConfigs = append( + out.RocketChatConfigs, + convertRocketchatConfigFrom(in), + ) + } + + dst.Spec.Receivers = append(dst.Spec.Receivers, out) + } + + for _, in := range src.Spec.InhibitRules { + dst.Spec.InhibitRules = append( + dst.Spec.InhibitRules, + InhibitRule{ + TargetMatch: convertMatchersFrom(in.TargetMatch), + SourceMatch: convertMatchersFrom(in.SourceMatch), + Equal: in.Equal, + }, + ) + } + + for _, in := range src.Spec.MuteTimeIntervals { + dst.Spec.TimeIntervals = append( + dst.Spec.TimeIntervals, + TimeInterval{ + Name: in.Name, + TimeIntervals: convertTimeIntervalsFrom(in.TimeIntervals), + }, + ) + } + + r, err := convertRouteFrom(src.Spec.Route) + if err != nil { + return err + } + dst.Spec.Route = r + + return nil +} diff --git a/vendor/github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring/v1beta1/conversion_to.go b/vendor/github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring/v1beta1/conversion_to.go new file mode 100644 index 0000000000..474aa4900c --- /dev/null +++ b/vendor/github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring/v1beta1/conversion_to.go @@ -0,0 +1,686 @@ +// Copyright 2022 The prometheus-operator Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package v1beta1 + +import ( + "encoding/json" + "fmt" + + v1 "k8s.io/api/core/v1" + apiextensionsv1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1" + "sigs.k8s.io/controller-runtime/pkg/conversion" + + "github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring/v1alpha1" +) + +func convertRouteTo(in *Route) (*v1alpha1.Route, error) { + if in == nil { + return nil, nil + } + + out := &v1alpha1.Route{ + Receiver: in.Receiver, + Continue: in.Continue, + GroupBy: in.GroupBy, + GroupWait: in.GroupWait, + GroupInterval: in.GroupInterval, + RepeatInterval: in.RepeatInterval, + Matchers: convertMatchersTo(in.Matchers), + MuteTimeIntervals: in.MuteTimeIntervals, + ActiveTimeIntervals: in.ActiveTimeIntervals, + } + + // Deserialize child routes to convert them to v1alpha1 and serialize back. + crs, err := in.ChildRoutes() + if err != nil { + return nil, err + } + + out.Routes = make([]apiextensionsv1.JSON, 0, len(in.Routes)) + for i := range crs { + cr, err := convertRouteTo(&crs[i]) + if err != nil { + return nil, fmt.Errorf("route[%d]: %w", i, err) + } + + b, err := json.Marshal(cr) + if err != nil { + return nil, fmt.Errorf("route[%d]: %w", i, err) + } + + out.Routes = append(out.Routes, apiextensionsv1.JSON{Raw: b}) + } + + return out, nil +} + +func convertMatchersTo(in []Matcher) []v1alpha1.Matcher { + out := make([]v1alpha1.Matcher, 0, len(in)) + + for _, m := range in { + out = append( + out, + v1alpha1.Matcher{ + Name: m.Name, + Value: m.Value, + MatchType: v1alpha1.MatchType(m.MatchType), + }, + ) + } + + return out +} + +func convertTimeIntervalsTo(in []TimePeriod) []v1alpha1.TimeInterval { + out := make([]v1alpha1.TimeInterval, 0, len(in)) + + for _, ti := range in { + var ( + trs = make([]v1alpha1.TimeRange, 0, len(ti.Times)) + wds = make([]v1alpha1.WeekdayRange, 0, len(ti.Weekdays)) + doms = make([]v1alpha1.DayOfMonthRange, 0, len(ti.DaysOfMonth)) + mrs = make([]v1alpha1.MonthRange, 0, len(ti.Months)) + yrs = make([]v1alpha1.YearRange, 0, len(ti.Years)) + ) + + for _, tr := range ti.Times { + trs = append(trs, v1alpha1.TimeRange{StartTime: v1alpha1.Time(tr.StartTime), EndTime: v1alpha1.Time(tr.EndTime)}) + } + + for _, wd := range ti.Weekdays { + wds = append(wds, v1alpha1.WeekdayRange(wd)) + } + + for _, dm := range ti.DaysOfMonth { + doms = append(doms, v1alpha1.DayOfMonthRange{Start: dm.Start, End: dm.End}) + } + + for _, mr := range ti.Months { + mrs = append(mrs, v1alpha1.MonthRange(mr)) + } + + for _, yr := range ti.Years { + yrs = append(yrs, v1alpha1.YearRange(yr)) + } + + out = append( + out, + v1alpha1.TimeInterval{ + Times: trs, + Weekdays: wds, + DaysOfMonth: doms, + Months: mrs, + Years: yrs, + }, + ) + } + + return out +} + +func convertHTTPConfigTo(in *HTTPConfig) *v1alpha1.HTTPConfig { + if in == nil { + return nil + } + + return &v1alpha1.HTTPConfig{ + Authorization: in.Authorization, + BasicAuth: in.BasicAuth, + OAuth2: in.OAuth2, + BearerTokenSecret: convertSecretKeySelectorTo(in.BearerTokenSecret), + TLSConfig: in.TLSConfig, + ProxyURLOriginal: in.ProxyURLOriginal, + ProxyConfig: in.ProxyConfig, + FollowRedirects: in.FollowRedirects, + EnableHTTP2: in.EnableHTTP2, + } +} + +func convertKeyValuesTo(in []KeyValue) []v1alpha1.KeyValue { + out := make([]v1alpha1.KeyValue, len(in)) + + for i := range in { + out[i] = v1alpha1.KeyValue{ + Key: in[i].Key, + Value: in[i].Value, + } + } + + return out + +} + +func convertSecretKeySelectorTo(in *SecretKeySelector) *v1.SecretKeySelector { + if in == nil { + return nil + } + + return &v1.SecretKeySelector{ + LocalObjectReference: v1.LocalObjectReference{ + Name: in.Name, + }, + Key: in.Key, + } +} + +func convertOpsGenieConfigRespondersTo(in []OpsGenieConfigResponder) []v1alpha1.OpsGenieConfigResponder { + out := make([]v1alpha1.OpsGenieConfigResponder, len(in)) + + for i := range in { + out[i] = v1alpha1.OpsGenieConfigResponder{ + ID: in[i].ID, + Name: in[i].Name, + Username: in[i].Username, + Type: in[i].Type, + } + } + + return out +} + +func convertOpsGenieConfigTo(in OpsGenieConfig) v1alpha1.OpsGenieConfig { + return v1alpha1.OpsGenieConfig{ + SendResolved: in.SendResolved, + APIKey: convertSecretKeySelectorTo(in.APIKey), + APIURL: (*v1alpha1.URL)(in.APIURL), + Message: in.Message, + Description: in.Description, + Source: in.Source, + Tags: in.Tags, + Note: in.Note, + Priority: in.Priority, + Details: convertKeyValuesTo(in.Details), + Responders: convertOpsGenieConfigRespondersTo(in.Responders), + HTTPConfig: convertHTTPConfigTo(in.HTTPConfig), + Entity: in.Entity, + Actions: in.Actions, + } +} + +func convertPagerDutyImageConfigsTo(in []PagerDutyImageConfig) []v1alpha1.PagerDutyImageConfig { + out := make([]v1alpha1.PagerDutyImageConfig, len(in)) + + for i := range in { + out[i] = v1alpha1.PagerDutyImageConfig{ + Src: in[i].Src, + Href: in[i].Href, + Alt: in[i].Alt, + } + } + + return out +} + +func convertPagerDutyLinkConfigsTo(in []PagerDutyLinkConfig) []v1alpha1.PagerDutyLinkConfig { + out := make([]v1alpha1.PagerDutyLinkConfig, len(in)) + + for i := range in { + out[i] = v1alpha1.PagerDutyLinkConfig{ + Href: in[i].Href, + Text: in[i].Text, + } + } + + return out +} + +func convertPagerDutyConfigTo(in PagerDutyConfig) v1alpha1.PagerDutyConfig { + return v1alpha1.PagerDutyConfig{ + SendResolved: in.SendResolved, + RoutingKey: convertSecretKeySelectorTo(in.RoutingKey), + ServiceKey: convertSecretKeySelectorTo(in.ServiceKey), + URL: (*v1alpha1.URL)(in.URL), + Client: in.Client, + ClientURL: in.ClientURL, + Description: in.Description, + Severity: in.Severity, + Class: in.Class, + Group: in.Group, + Component: in.Component, + Details: convertKeyValuesTo(in.Details), + PagerDutyImageConfigs: convertPagerDutyImageConfigsTo(in.PagerDutyImageConfigs), + PagerDutyLinkConfigs: convertPagerDutyLinkConfigsTo(in.PagerDutyLinkConfigs), + HTTPConfig: convertHTTPConfigTo(in.HTTPConfig), + Source: in.Source, + Timeout: in.Timeout, + } +} + +func convertDiscordConfigTo(in DiscordConfig) v1alpha1.DiscordConfig { + return v1alpha1.DiscordConfig{ + APIURL: in.APIURL, + HTTPConfig: convertHTTPConfigTo(in.HTTPConfig), + Title: in.Title, + Message: in.Message, + SendResolved: in.SendResolved, + Content: in.Content, + Username: in.Username, + AvatarURL: (*v1alpha1.URL)(in.AvatarURL), + } +} + +func convertRocketChatFieldConfigsTo(in []RocketChatFieldConfig) []v1alpha1.RocketChatFieldConfig { + if in == nil { + return nil + } + out := make([]v1alpha1.RocketChatFieldConfig, len(in)) + for i, field := range in { + out[i] = v1alpha1.RocketChatFieldConfig{ + Title: field.Title, + Value: field.Value, + Short: field.Short, + } + } + return out +} + +func convertRocketChatActionConfigsTo(in []RocketChatActionConfig) []v1alpha1.RocketChatActionConfig { + if in == nil { + return nil + } + out := make([]v1alpha1.RocketChatActionConfig, len(in)) + for i, action := range in { + out[i] = v1alpha1.RocketChatActionConfig{ + Text: action.Text, + URL: action.URL, + Msg: action.Msg, + } + } + return out +} + +func convertRocketchatConfigTo(in RocketChatConfig) v1alpha1.RocketChatConfig { + return v1alpha1.RocketChatConfig{ + SendResolved: in.SendResolved, + APIURL: (*v1alpha1.URL)(in.APIURL), + Channel: in.Channel, + Token: in.Token, + TokenID: in.TokenID, + Color: in.Color, + Emoji: in.Emoji, + IconURL: in.IconURL, + Text: in.Text, + Title: in.Title, + TitleLink: in.TitleLink, + Fields: convertRocketChatFieldConfigsTo(in.Fields), + ShortFields: in.ShortFields, + ImageURL: in.ImageURL, + ThumbURL: in.ThumbURL, + LinkNames: in.LinkNames, + Actions: convertRocketChatActionConfigsTo(in.Actions), + HTTPConfig: convertHTTPConfigTo(in.HTTPConfig), + } +} + +func convertSlackFieldsTo(in []SlackField) []v1alpha1.SlackField { + out := make([]v1alpha1.SlackField, len(in)) + + for i := range in { + out[i] = v1alpha1.SlackField{ + Title: in[i].Title, + Value: in[i].Value, + Short: in[i].Short, + } + } + + return out +} + +func convertSlackActionsTo(in []SlackAction) []v1alpha1.SlackAction { + out := make([]v1alpha1.SlackAction, len(in)) + + for i := range in { + out[i] = v1alpha1.SlackAction{ + Type: in[i].Type, + Text: in[i].Text, + URL: in[i].URL, + Style: in[i].Style, + Name: in[i].Name, + Value: in[i].Value, + } + if in[i].ConfirmField != nil { + out[i].ConfirmField = &v1alpha1.SlackConfirmationField{ + Text: in[i].ConfirmField.Text, + Title: in[i].ConfirmField.Title, + OkText: in[i].ConfirmField.OkText, + DismissText: in[i].ConfirmField.DismissText, + } + } + } + + return out +} + +func convertSlackConfigTo(in SlackConfig) v1alpha1.SlackConfig { + return v1alpha1.SlackConfig{ + SendResolved: in.SendResolved, + APIURL: convertSecretKeySelectorTo(in.APIURL), + Channel: in.Channel, + Username: in.Username, + Color: in.Color, + Title: in.Title, + TitleLink: in.TitleLink, + Pretext: in.Pretext, + Text: in.Text, + Fields: convertSlackFieldsTo(in.Fields), + ShortFields: in.ShortFields, + Footer: in.Footer, + Fallback: in.Fallback, + CallbackID: in.CallbackID, + IconEmoji: in.IconEmoji, + IconURL: in.IconURL, + ImageURL: in.ImageURL, + ThumbURL: in.ThumbURL, + LinkNames: in.LinkNames, + MrkdwnIn: in.MrkdwnIn, + Actions: convertSlackActionsTo(in.Actions), + HTTPConfig: convertHTTPConfigTo(in.HTTPConfig), + Timeout: in.Timeout, + } +} + +func convertWebexConfigTo(in WebexConfig) v1alpha1.WebexConfig { + return v1alpha1.WebexConfig{ + APIURL: (*v1alpha1.URL)(in.APIURL), + HTTPConfig: convertHTTPConfigTo(in.HTTPConfig), + Message: in.Message, + RoomID: in.RoomID, + SendResolved: in.SendResolved, + } +} + +func convertWebhookConfigTo(in WebhookConfig) v1alpha1.WebhookConfig { + return v1alpha1.WebhookConfig{ + SendResolved: in.SendResolved, + URL: in.URL, + URLSecret: convertSecretKeySelectorTo(in.URLSecret), + HTTPConfig: convertHTTPConfigTo(in.HTTPConfig), + MaxAlerts: in.MaxAlerts, + Timeout: in.Timeout, + } +} + +func convertWeChatConfigTo(in WeChatConfig) v1alpha1.WeChatConfig { + return v1alpha1.WeChatConfig{ + SendResolved: in.SendResolved, + APISecret: convertSecretKeySelectorTo(in.APISecret), + APIURL: (*v1alpha1.URL)(in.APIURL), + CorpID: in.CorpID, + AgentID: in.AgentID, + ToUser: in.ToUser, + ToParty: in.ToParty, + ToTag: in.ToTag, + Message: in.Message, + MessageType: in.MessageType, + HTTPConfig: convertHTTPConfigTo(in.HTTPConfig), + } +} + +func convertEmailConfigTo(in EmailConfig) v1alpha1.EmailConfig { + return v1alpha1.EmailConfig{ + SendResolved: in.SendResolved, + To: in.To, + From: in.From, + Hello: in.Hello, + Smarthost: in.Smarthost, + AuthUsername: in.AuthUsername, + AuthPassword: convertSecretKeySelectorTo(in.AuthPassword), + AuthSecret: convertSecretKeySelectorTo(in.AuthSecret), + AuthIdentity: in.AuthIdentity, + Headers: convertKeyValuesTo(in.Headers), + HTML: in.HTML, + Text: in.Text, + RequireTLS: in.RequireTLS, + TLSConfig: in.TLSConfig, + } +} + +func convertVictorOpsConfigTo(in VictorOpsConfig) v1alpha1.VictorOpsConfig { + return v1alpha1.VictorOpsConfig{ + SendResolved: in.SendResolved, + APIKey: convertSecretKeySelectorTo(in.APIKey), + APIURL: (*v1alpha1.URL)(in.APIURL), + RoutingKey: in.RoutingKey, + MessageType: in.MessageType, + EntityDisplayName: in.EntityDisplayName, + StateMessage: in.StateMessage, + MonitoringTool: in.MonitoringTool, + CustomFields: convertKeyValuesTo(in.CustomFields), + HTTPConfig: convertHTTPConfigTo(in.HTTPConfig), + } +} + +func convertPushoverConfigTo(in PushoverConfig) v1alpha1.PushoverConfig { + return v1alpha1.PushoverConfig{ + SendResolved: in.SendResolved, + UserKey: convertSecretKeySelectorTo(in.UserKey), + UserKeyFile: in.UserKeyFile, + Token: convertSecretKeySelectorTo(in.Token), + TokenFile: in.TokenFile, + Title: in.Title, + Message: in.Message, + URL: in.URL, + URLTitle: in.URLTitle, + Device: in.Device, + Sound: in.Sound, + Priority: in.Priority, + Retry: in.Retry, + Expire: in.Expire, + HTML: in.HTML, + Monospace: in.Monospace, + HTTPConfig: convertHTTPConfigTo(in.HTTPConfig), + } +} + +func convertSNSConfigTo(in SNSConfig) v1alpha1.SNSConfig { + return v1alpha1.SNSConfig{ + SendResolved: in.SendResolved, + ApiURL: in.ApiURL, + Sigv4: in.Sigv4, + TopicARN: in.TopicARN, + Subject: in.Subject, + PhoneNumber: in.PhoneNumber, + TargetARN: in.TargetARN, + Message: in.Message, + Attributes: in.Attributes, + HTTPConfig: convertHTTPConfigTo(in.HTTPConfig), + } +} + +func convertTelegramConfigTo(in TelegramConfig) v1alpha1.TelegramConfig { + return v1alpha1.TelegramConfig{ + SendResolved: in.SendResolved, + APIURL: (*v1alpha1.URL)(in.APIURL), + BotToken: convertSecretKeySelectorTo(in.BotToken), + BotTokenFile: in.BotTokenFile, + ChatID: in.ChatID, + MessageThreadID: in.MessageThreadID, + Message: in.Message, + DisableNotifications: in.DisableNotifications, + ParseMode: in.ParseMode, + HTTPConfig: convertHTTPConfigTo(in.HTTPConfig), + } +} + +func convertMSTeamsConfigTo(in MSTeamsConfig) v1alpha1.MSTeamsConfig { + return v1alpha1.MSTeamsConfig{ + SendResolved: in.SendResolved, + WebhookURL: in.WebhookURL, + Title: in.Title, + Text: in.Text, + Summary: in.Summary, + HTTPConfig: convertHTTPConfigTo(in.HTTPConfig), + } +} + +func convertMSTeamsV2ConfigTo(in MSTeamsV2Config) v1alpha1.MSTeamsV2Config { + return v1alpha1.MSTeamsV2Config{ + SendResolved: in.SendResolved, + WebhookURL: in.WebhookURL, + Title: in.Title, + Text: in.Text, + HTTPConfig: convertHTTPConfigTo(in.HTTPConfig), + } +} + +// ConvertTo converts from this version (v1beta1) to the Hub version (v1alpha1). +func (src *AlertmanagerConfig) ConvertTo(dstRaw conversion.Hub) error { + dst := dstRaw.(*v1alpha1.AlertmanagerConfig) + + dst.ObjectMeta = src.ObjectMeta + + for _, in := range src.Spec.Receivers { + out := v1alpha1.Receiver{ + Name: in.Name, + } + + for _, in := range in.OpsGenieConfigs { + out.OpsGenieConfigs = append( + out.OpsGenieConfigs, + convertOpsGenieConfigTo(in), + ) + } + + for _, in := range in.PagerDutyConfigs { + out.PagerDutyConfigs = append( + out.PagerDutyConfigs, + convertPagerDutyConfigTo(in), + ) + } + + for _, in := range in.DiscordConfigs { + out.DiscordConfigs = append( + out.DiscordConfigs, + convertDiscordConfigTo(in), + ) + } + + for _, in := range in.SlackConfigs { + out.SlackConfigs = append( + out.SlackConfigs, + convertSlackConfigTo(in), + ) + } + + for _, in := range in.WebexConfigs { + out.WebexConfigs = append( + out.WebexConfigs, + convertWebexConfigTo(in), + ) + } + + for _, in := range in.WebhookConfigs { + out.WebhookConfigs = append( + out.WebhookConfigs, + convertWebhookConfigTo(in), + ) + } + + for _, in := range in.WeChatConfigs { + out.WeChatConfigs = append( + out.WeChatConfigs, + convertWeChatConfigTo(in), + ) + } + + for _, in := range in.EmailConfigs { + out.EmailConfigs = append( + out.EmailConfigs, + convertEmailConfigTo(in), + ) + } + + for _, in := range in.VictorOpsConfigs { + out.VictorOpsConfigs = append( + out.VictorOpsConfigs, + convertVictorOpsConfigTo(in), + ) + } + + for _, in := range in.PushoverConfigs { + out.PushoverConfigs = append( + out.PushoverConfigs, + convertPushoverConfigTo(in), + ) + } + + for _, in := range in.SNSConfigs { + out.SNSConfigs = append( + out.SNSConfigs, + convertSNSConfigTo(in), + ) + } + + for _, in := range in.TelegramConfigs { + out.TelegramConfigs = append( + out.TelegramConfigs, + convertTelegramConfigTo(in), + ) + } + + for _, in := range in.MSTeamsConfigs { + out.MSTeamsConfigs = append( + out.MSTeamsConfigs, + convertMSTeamsConfigTo(in), + ) + } + + for _, in := range in.MSTeamsV2Configs { + out.MSTeamsV2Configs = append( + out.MSTeamsV2Configs, + convertMSTeamsV2ConfigTo(in), + ) + } + + for _, in := range in.RocketChatConfigs { + out.RocketChatConfigs = append( + out.RocketChatConfigs, + convertRocketchatConfigTo(in), + ) + } + + dst.Spec.Receivers = append(dst.Spec.Receivers, out) + } + + for _, in := range src.Spec.InhibitRules { + dst.Spec.InhibitRules = append( + dst.Spec.InhibitRules, + v1alpha1.InhibitRule{ + TargetMatch: convertMatchersTo(in.TargetMatch), + SourceMatch: convertMatchersTo(in.SourceMatch), + Equal: in.Equal, + }, + ) + + } + + for _, in := range src.Spec.TimeIntervals { + dst.Spec.MuteTimeIntervals = append( + dst.Spec.MuteTimeIntervals, + v1alpha1.MuteTimeInterval{ + Name: in.Name, + TimeIntervals: convertTimeIntervalsTo(in.TimeIntervals), + }, + ) + } + + r, err := convertRouteTo(src.Spec.Route) + if err != nil { + return err + } + dst.Spec.Route = r + + return nil +} diff --git a/vendor/github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring/v1beta1/doc.go b/vendor/github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring/v1beta1/doc.go new file mode 100644 index 0000000000..5b75b14217 --- /dev/null +++ b/vendor/github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring/v1beta1/doc.go @@ -0,0 +1,18 @@ +// Copyright 2020 The prometheus-operator Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// +k8s:deepcopy-gen=package +// +groupName=monitoring.coreos.com + +package v1beta1 diff --git a/vendor/github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring/v1beta1/register.go b/vendor/github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring/v1beta1/register.go new file mode 100644 index 0000000000..96e40e832c --- /dev/null +++ b/vendor/github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring/v1beta1/register.go @@ -0,0 +1,55 @@ +// Copyright 2020 The prometheus-operator Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package v1beta1 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" + + "github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring" +) + +// SchemeGroupVersion is the group version used to register these objects +var SchemeGroupVersion = schema.GroupVersion{Group: monitoring.GroupName, Version: Version} + +// Resource takes an unqualified resource and returns a Group qualified GroupResource +func Resource(resource string) schema.GroupResource { + return SchemeGroupVersion.WithResource(resource).GroupResource() +} + +var ( + // localSchemeBuilder and AddToScheme will stay in k8s.io/kubernetes. + SchemeBuilder runtime.SchemeBuilder + localSchemeBuilder = &SchemeBuilder + AddToScheme = localSchemeBuilder.AddToScheme +) + +func init() { + // We only register manually written functions here. The registration of the + // generated functions takes place in the generated files. The separation + // makes the code compile even when the generated files are missing. + localSchemeBuilder.Register(addKnownTypes) +} + +// Adds the list of known types to api.Scheme. +func addKnownTypes(scheme *runtime.Scheme) error { + scheme.AddKnownTypes(SchemeGroupVersion, + &AlertmanagerConfig{}, + &AlertmanagerConfigList{}, + ) + metav1.AddToGroupVersion(scheme, SchemeGroupVersion) + return nil +} diff --git a/vendor/github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring/v1beta1/validation.go b/vendor/github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring/v1beta1/validation.go new file mode 100644 index 0000000000..856566122b --- /dev/null +++ b/vendor/github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring/v1beta1/validation.go @@ -0,0 +1,353 @@ +// Copyright 2021 The prometheus-operator Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package v1beta1 + +import ( + "errors" + "fmt" + "regexp" + "strconv" + "strings" +) + +func (hc *HTTPConfig) Validate() error { + if hc == nil { + return nil + } + + if (hc.BasicAuth != nil || hc.OAuth2 != nil) && (hc.BearerTokenSecret != nil) { + return fmt.Errorf("at most one of basicAuth, oauth2, bearerTokenSecret must be configured") + } + + if hc.Authorization != nil { + if hc.BearerTokenSecret != nil { + return fmt.Errorf("authorization is not compatible with bearerTokenSecret") + } + + if hc.BasicAuth != nil || hc.OAuth2 != nil { + return fmt.Errorf("at most one of basicAuth, oauth2 & authorization must be configured") + } + + if err := hc.Authorization.Validate(); err != nil { + return err + } + } + + if hc.OAuth2 != nil { + if hc.BasicAuth != nil { + return fmt.Errorf("at most one of basicAuth, oauth2 & authorization must be configured") + } + + if err := hc.OAuth2.Validate(); err != nil { + return err + } + } + + if hc.TLSConfig != nil { + if err := hc.TLSConfig.Validate(); err != nil { + return err + } + } + + if err := hc.ProxyConfig.Validate(); err != nil { + return err + } + return nil +} + +// Validate the TimeInterval +func (ti TimeInterval) Validate() error { + if ti.Name == "" { + return errors.New("empty name field for time interval") + } + + for i, ti := range ti.TimeIntervals { + for _, time := range ti.Times { + if err := time.Validate(); err != nil { + return fmt.Errorf("time range at %d is invalid: %w", i, err) + } + } + for _, weekday := range ti.Weekdays { + if err := weekday.Validate(); err != nil { + return fmt.Errorf("weekday range at %d is invalid: %w", i, err) + } + } + for _, dom := range ti.DaysOfMonth { + if err := dom.Validate(); err != nil { + return fmt.Errorf("day of month range at %d is invalid: %w", i, err) + } + } + for _, month := range ti.Months { + if err := month.Validate(); err != nil { + return fmt.Errorf("month range at %d is invalid: %w", i, err) + } + } + for _, year := range ti.Years { + if err := year.Validate(); err != nil { + return fmt.Errorf("year range at %d is invalid: %w", i, err) + } + } + } + return nil +} + +// Validate the TimeRange +func (tr TimeRange) Validate() error { + _, err := tr.Parse() + return err +} + +// Parse returns a ParsedRange on valid input or an error if the fields cannot be parsed +// End of the day is represented as 1440. +func (tr TimeRange) Parse() (*ParsedRange, error) { + if tr.StartTime == "" || tr.EndTime == "" { + return nil, fmt.Errorf("start and end are required") + } + + start, err := parseTime(string(tr.StartTime)) + if err != nil { + return nil, fmt.Errorf("start time invalid: %w", err) + } + + end, err := parseTime(string(tr.EndTime)) + if err != nil { + return nil, fmt.Errorf("end time invalid: %w", err) + } + + if start >= end { + return nil, fmt.Errorf("start time %d cannot be equal or greater than end time %d", start, end) + } + return &ParsedRange{ + Start: start, + End: end, + }, nil +} + +// Validate the WeekdayRange +func (wr WeekdayRange) Validate() error { + _, err := wr.Parse() + return err +} + +// Parse returns a ParsedRange on valid input or an error if the fields cannot be parsed +// The week starts on Sunday -> 0 +func (wr WeekdayRange) Parse() (*ParsedRange, error) { + startStr, endStr, err := parseRange(string(wr)) + if err != nil { + return nil, err + } + + start, err := Weekday(startStr).Int() + if err != nil { + return nil, fmt.Errorf("failed to parse start day from weekday range: %w", err) + } + + end, err := Weekday(endStr).Int() + if err != nil { + return nil, fmt.Errorf("failed to parse end day from weekday range: %w", err) + } + + if start > end { + return nil, errors.New("start day cannot be before end day") + } + if start < 0 || start > 6 { + return nil, fmt.Errorf("%s is not a valid day of the week: out of range", startStr) + } + if end < 0 || end > 6 { + return nil, fmt.Errorf("%s is not a valid day of the week: out of range", endStr) + } + return &ParsedRange{Start: start, End: end}, nil +} + +// Validate the YearRange +func (yr YearRange) Validate() error { + _, err := yr.Parse() + return err +} + +// Parse returns a ParsedRange on valid input or an error if the fields cannot be parsed +func (yr YearRange) Parse() (*ParsedRange, error) { + startStr, endStr, err := parseRange(string(yr)) + if err != nil { + return nil, err + } + + start, err := strconv.Atoi(startStr) + if err != nil { + return nil, fmt.Errorf("start year cannot be %s parsed: %w", startStr, err) + } + + end, err := strconv.Atoi(endStr) + if err != nil { + return nil, fmt.Errorf("end year cannot be %s parsed: %w", endStr, err) + } + + if start > end { + return nil, fmt.Errorf("end year %d is before start year %d", end, start) + } + return &ParsedRange{Start: start, End: end}, nil +} + +// Int returns an integer, which is the canonical representation +// of the Weekday in upstream types. +// Returns an error if the Weekday is invalid +func (w Weekday) Int() (int, error) { + normaliseWeekday := Weekday(strings.ToLower(string(w))) + + day, found := daysOfWeek[normaliseWeekday] + if !found { + i, err := strconv.Atoi(string(normaliseWeekday)) + if err != nil { + return day, fmt.Errorf("%s is an invalid weekday", w) + } + day = i + } + + return day, nil +} + +// Int validates the Month and returns an integer, which is the canonical representation +// of the Month in upstream types. +// Returns an error if the Month is invalid +func (m Month) Int() (int, error) { + normaliseMonth := Month(strings.ToLower(string(m))) + + month, found := months[normaliseMonth] + if !found { + i, err := strconv.Atoi(string(normaliseMonth)) + if err != nil || i < 1 || i > 12 { + return month, fmt.Errorf("%s is an invalid month", m) + } + month = i + } + + return month, nil +} + +// Validate the DayOfMonthRange +func (r DayOfMonthRange) Validate() error { + // Note: Validation is copied from UnmarshalYAML for DayOfMonthRange in alertmanager repo + + // Check beginning <= end accounting for negatives day of month indices as well. + // Months != 31 days can't be addressed here and are clamped, but at least we can catch blatant errors. + if r.Start == 0 || r.Start < -31 || r.Start > 31 { + return fmt.Errorf("%d is not a valid day of the month: out of range", r.Start) + } + if r.End == 0 || r.End < -31 || r.End > 31 { + return fmt.Errorf("%d is not a valid day of the month: out of range", r.End) + } + // Restricting here prevents errors where begin > end in longer months but not shorter months. + if r.Start < 0 && r.End > 0 { + return fmt.Errorf("end day must be negative if start day is negative") + } + // Check begin <= end. We can't know this for sure when using negative indices, + // but we can prevent cases where its always invalid (using 28 day minimum length). + checkBegin := r.Start + checkEnd := r.End + if r.Start < 0 { + checkBegin = 28 + r.Start + } + if r.End < 0 { + checkEnd = 28 + r.End + } + if checkBegin > checkEnd { + return fmt.Errorf("end day %d is always before start day %d", r.End, r.Start) + } + return nil +} + +// Validate the month range +func (mr MonthRange) Validate() error { + _, err := mr.Parse() + return err +} + +// Parse returns a ParsedMonthRange or error on invalid input +func (mr MonthRange) Parse() (*ParsedRange, error) { + startStr, endStr, err := parseRange(string(mr)) + if err != nil { + return nil, err + } + + start, err := Month(startStr).Int() + if err != nil { + return nil, fmt.Errorf("failed to parse start month from month range: %w", err) + } + + end, err := Month(endStr).Int() + if err != nil { + return nil, fmt.Errorf("failed to parse start month from month range: %w", err) + } + + if start > end { + return nil, fmt.Errorf("end month %s is before start month %s", endStr, startStr) + } + return &ParsedRange{ + Start: start, + End: end, + }, nil +} + +// ParsedRange is an integer representation of a range +// +kubebuilder:object:generate:=false +type ParsedRange struct { + // start defines the beginning of the range + // +optional + Start int `json:"start,omitempty"` + // end defines the end of the range + // +optional + End int `json:"end,omitempty"` +} + +var validTime = "^((([01][0-9])|(2[0-3])):[0-5][0-9])$|(^24:00$)" +var validTimeRE = regexp.MustCompile(validTime) + +// Converts a string of the form "HH:MM" into the number of minutes elapsed in the day. +func parseTime(in string) (mins int, err error) { + if !validTimeRE.MatchString(in) { + return 0, fmt.Errorf("couldn't parse timestamp %s, invalid format", in) + } + timestampComponents := strings.Split(in, ":") + if len(timestampComponents) != 2 { + return 0, fmt.Errorf("invalid timestamp format: %s", in) + } + timeStampHours, err := strconv.Atoi(timestampComponents[0]) + if err != nil { + return 0, err + } + timeStampMinutes, err := strconv.Atoi(timestampComponents[1]) + if err != nil { + return 0, err + } + if timeStampHours < 0 || timeStampHours > 24 || timeStampMinutes < 0 || timeStampMinutes > 60 { + return 0, fmt.Errorf("timestamp %s out of range", in) + } + // Timestamps are stored as minutes elapsed in the day, so multiply hours by 60. + mins = timeStampHours*60 + timeStampMinutes + return mins, nil +} + +// parseRange parses a valid range string into parts +func parseRange(in string) (start, end string, err error) { + if !strings.ContainsRune(in, ':') { + return in, in, nil + } + + parts := strings.Split(string(in), ":") + if len(parts) != 2 { + return start, end, fmt.Errorf("invalid range provided %s", in) + } + return parts[0], parts[1], nil +} diff --git a/vendor/github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring/v1beta1/zz_generated.deepcopy.go b/vendor/github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring/v1beta1/zz_generated.deepcopy.go new file mode 100644 index 0000000000..6b80e2ca45 --- /dev/null +++ b/vendor/github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring/v1beta1/zz_generated.deepcopy.go @@ -0,0 +1,1617 @@ +//go:build !ignore_autogenerated + +// Copyright The prometheus-operator Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Code generated by controller-gen. DO NOT EDIT. + +package v1beta1 + +import ( + monitoringv1 "github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring/v1" + corev1 "k8s.io/api/core/v1" + "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1" +) + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AlertmanagerConfig) DeepCopyInto(out *AlertmanagerConfig) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AlertmanagerConfig. +func (in *AlertmanagerConfig) DeepCopy() *AlertmanagerConfig { + if in == nil { + return nil + } + out := new(AlertmanagerConfig) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AlertmanagerConfigList) DeepCopyInto(out *AlertmanagerConfigList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]AlertmanagerConfig, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AlertmanagerConfigList. +func (in *AlertmanagerConfigList) DeepCopy() *AlertmanagerConfigList { + if in == nil { + return nil + } + out := new(AlertmanagerConfigList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AlertmanagerConfigSpec) DeepCopyInto(out *AlertmanagerConfigSpec) { + *out = *in + if in.Route != nil { + in, out := &in.Route, &out.Route + *out = new(Route) + (*in).DeepCopyInto(*out) + } + if in.Receivers != nil { + in, out := &in.Receivers, &out.Receivers + *out = make([]Receiver, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.InhibitRules != nil { + in, out := &in.InhibitRules, &out.InhibitRules + *out = make([]InhibitRule, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.TimeIntervals != nil { + in, out := &in.TimeIntervals, &out.TimeIntervals + *out = make([]TimeInterval, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AlertmanagerConfigSpec. +func (in *AlertmanagerConfigSpec) DeepCopy() *AlertmanagerConfigSpec { + if in == nil { + return nil + } + out := new(AlertmanagerConfigSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DayOfMonthRange) DeepCopyInto(out *DayOfMonthRange) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DayOfMonthRange. +func (in *DayOfMonthRange) DeepCopy() *DayOfMonthRange { + if in == nil { + return nil + } + out := new(DayOfMonthRange) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DiscordConfig) DeepCopyInto(out *DiscordConfig) { + *out = *in + if in.SendResolved != nil { + in, out := &in.SendResolved, &out.SendResolved + *out = new(bool) + **out = **in + } + in.APIURL.DeepCopyInto(&out.APIURL) + if in.Title != nil { + in, out := &in.Title, &out.Title + *out = new(string) + **out = **in + } + if in.Message != nil { + in, out := &in.Message, &out.Message + *out = new(string) + **out = **in + } + if in.Content != nil { + in, out := &in.Content, &out.Content + *out = new(string) + **out = **in + } + if in.Username != nil { + in, out := &in.Username, &out.Username + *out = new(string) + **out = **in + } + if in.AvatarURL != nil { + in, out := &in.AvatarURL, &out.AvatarURL + *out = new(URL) + **out = **in + } + if in.HTTPConfig != nil { + in, out := &in.HTTPConfig, &out.HTTPConfig + *out = new(HTTPConfig) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DiscordConfig. +func (in *DiscordConfig) DeepCopy() *DiscordConfig { + if in == nil { + return nil + } + out := new(DiscordConfig) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *EmailConfig) DeepCopyInto(out *EmailConfig) { + *out = *in + if in.SendResolved != nil { + in, out := &in.SendResolved, &out.SendResolved + *out = new(bool) + **out = **in + } + if in.AuthPassword != nil { + in, out := &in.AuthPassword, &out.AuthPassword + *out = new(SecretKeySelector) + **out = **in + } + if in.AuthSecret != nil { + in, out := &in.AuthSecret, &out.AuthSecret + *out = new(SecretKeySelector) + **out = **in + } + if in.Headers != nil { + in, out := &in.Headers, &out.Headers + *out = make([]KeyValue, len(*in)) + copy(*out, *in) + } + if in.HTML != nil { + in, out := &in.HTML, &out.HTML + *out = new(string) + **out = **in + } + if in.Text != nil { + in, out := &in.Text, &out.Text + *out = new(string) + **out = **in + } + if in.RequireTLS != nil { + in, out := &in.RequireTLS, &out.RequireTLS + *out = new(bool) + **out = **in + } + if in.TLSConfig != nil { + in, out := &in.TLSConfig, &out.TLSConfig + *out = new(monitoringv1.SafeTLSConfig) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EmailConfig. +func (in *EmailConfig) DeepCopy() *EmailConfig { + if in == nil { + return nil + } + out := new(EmailConfig) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *HTTPConfig) DeepCopyInto(out *HTTPConfig) { + *out = *in + if in.Authorization != nil { + in, out := &in.Authorization, &out.Authorization + *out = new(monitoringv1.SafeAuthorization) + (*in).DeepCopyInto(*out) + } + if in.BasicAuth != nil { + in, out := &in.BasicAuth, &out.BasicAuth + *out = new(monitoringv1.BasicAuth) + (*in).DeepCopyInto(*out) + } + if in.OAuth2 != nil { + in, out := &in.OAuth2, &out.OAuth2 + *out = new(monitoringv1.OAuth2) + (*in).DeepCopyInto(*out) + } + if in.BearerTokenSecret != nil { + in, out := &in.BearerTokenSecret, &out.BearerTokenSecret + *out = new(SecretKeySelector) + **out = **in + } + if in.TLSConfig != nil { + in, out := &in.TLSConfig, &out.TLSConfig + *out = new(monitoringv1.SafeTLSConfig) + (*in).DeepCopyInto(*out) + } + if in.ProxyURLOriginal != nil { + in, out := &in.ProxyURLOriginal, &out.ProxyURLOriginal + *out = new(string) + **out = **in + } + in.ProxyConfig.DeepCopyInto(&out.ProxyConfig) + if in.FollowRedirects != nil { + in, out := &in.FollowRedirects, &out.FollowRedirects + *out = new(bool) + **out = **in + } + if in.EnableHTTP2 != nil { + in, out := &in.EnableHTTP2, &out.EnableHTTP2 + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HTTPConfig. +func (in *HTTPConfig) DeepCopy() *HTTPConfig { + if in == nil { + return nil + } + out := new(HTTPConfig) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *InhibitRule) DeepCopyInto(out *InhibitRule) { + *out = *in + if in.TargetMatch != nil { + in, out := &in.TargetMatch, &out.TargetMatch + *out = make([]Matcher, len(*in)) + copy(*out, *in) + } + if in.SourceMatch != nil { + in, out := &in.SourceMatch, &out.SourceMatch + *out = make([]Matcher, len(*in)) + copy(*out, *in) + } + if in.Equal != nil { + in, out := &in.Equal, &out.Equal + *out = make([]string, len(*in)) + copy(*out, *in) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new InhibitRule. +func (in *InhibitRule) DeepCopy() *InhibitRule { + if in == nil { + return nil + } + out := new(InhibitRule) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *KeyValue) DeepCopyInto(out *KeyValue) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KeyValue. +func (in *KeyValue) DeepCopy() *KeyValue { + if in == nil { + return nil + } + out := new(KeyValue) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MSTeamsConfig) DeepCopyInto(out *MSTeamsConfig) { + *out = *in + if in.SendResolved != nil { + in, out := &in.SendResolved, &out.SendResolved + *out = new(bool) + **out = **in + } + in.WebhookURL.DeepCopyInto(&out.WebhookURL) + if in.Title != nil { + in, out := &in.Title, &out.Title + *out = new(string) + **out = **in + } + if in.Summary != nil { + in, out := &in.Summary, &out.Summary + *out = new(string) + **out = **in + } + if in.Text != nil { + in, out := &in.Text, &out.Text + *out = new(string) + **out = **in + } + if in.HTTPConfig != nil { + in, out := &in.HTTPConfig, &out.HTTPConfig + *out = new(HTTPConfig) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MSTeamsConfig. +func (in *MSTeamsConfig) DeepCopy() *MSTeamsConfig { + if in == nil { + return nil + } + out := new(MSTeamsConfig) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MSTeamsV2Config) DeepCopyInto(out *MSTeamsV2Config) { + *out = *in + if in.SendResolved != nil { + in, out := &in.SendResolved, &out.SendResolved + *out = new(bool) + **out = **in + } + if in.WebhookURL != nil { + in, out := &in.WebhookURL, &out.WebhookURL + *out = new(corev1.SecretKeySelector) + (*in).DeepCopyInto(*out) + } + if in.Title != nil { + in, out := &in.Title, &out.Title + *out = new(string) + **out = **in + } + if in.Text != nil { + in, out := &in.Text, &out.Text + *out = new(string) + **out = **in + } + if in.HTTPConfig != nil { + in, out := &in.HTTPConfig, &out.HTTPConfig + *out = new(HTTPConfig) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MSTeamsV2Config. +func (in *MSTeamsV2Config) DeepCopy() *MSTeamsV2Config { + if in == nil { + return nil + } + out := new(MSTeamsV2Config) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Matcher) DeepCopyInto(out *Matcher) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Matcher. +func (in *Matcher) DeepCopy() *Matcher { + if in == nil { + return nil + } + out := new(Matcher) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *OpsGenieConfig) DeepCopyInto(out *OpsGenieConfig) { + *out = *in + if in.SendResolved != nil { + in, out := &in.SendResolved, &out.SendResolved + *out = new(bool) + **out = **in + } + if in.APIKey != nil { + in, out := &in.APIKey, &out.APIKey + *out = new(SecretKeySelector) + **out = **in + } + if in.APIURL != nil { + in, out := &in.APIURL, &out.APIURL + *out = new(URL) + **out = **in + } + if in.Details != nil { + in, out := &in.Details, &out.Details + *out = make([]KeyValue, len(*in)) + copy(*out, *in) + } + if in.Responders != nil { + in, out := &in.Responders, &out.Responders + *out = make([]OpsGenieConfigResponder, len(*in)) + copy(*out, *in) + } + if in.HTTPConfig != nil { + in, out := &in.HTTPConfig, &out.HTTPConfig + *out = new(HTTPConfig) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OpsGenieConfig. +func (in *OpsGenieConfig) DeepCopy() *OpsGenieConfig { + if in == nil { + return nil + } + out := new(OpsGenieConfig) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *OpsGenieConfigResponder) DeepCopyInto(out *OpsGenieConfigResponder) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OpsGenieConfigResponder. +func (in *OpsGenieConfigResponder) DeepCopy() *OpsGenieConfigResponder { + if in == nil { + return nil + } + out := new(OpsGenieConfigResponder) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PagerDutyConfig) DeepCopyInto(out *PagerDutyConfig) { + *out = *in + if in.SendResolved != nil { + in, out := &in.SendResolved, &out.SendResolved + *out = new(bool) + **out = **in + } + if in.RoutingKey != nil { + in, out := &in.RoutingKey, &out.RoutingKey + *out = new(SecretKeySelector) + **out = **in + } + if in.ServiceKey != nil { + in, out := &in.ServiceKey, &out.ServiceKey + *out = new(SecretKeySelector) + **out = **in + } + if in.URL != nil { + in, out := &in.URL, &out.URL + *out = new(URL) + **out = **in + } + if in.Client != nil { + in, out := &in.Client, &out.Client + *out = new(string) + **out = **in + } + if in.ClientURL != nil { + in, out := &in.ClientURL, &out.ClientURL + *out = new(string) + **out = **in + } + if in.Description != nil { + in, out := &in.Description, &out.Description + *out = new(string) + **out = **in + } + if in.Severity != nil { + in, out := &in.Severity, &out.Severity + *out = new(string) + **out = **in + } + if in.Class != nil { + in, out := &in.Class, &out.Class + *out = new(string) + **out = **in + } + if in.Group != nil { + in, out := &in.Group, &out.Group + *out = new(string) + **out = **in + } + if in.Component != nil { + in, out := &in.Component, &out.Component + *out = new(string) + **out = **in + } + if in.Details != nil { + in, out := &in.Details, &out.Details + *out = make([]KeyValue, len(*in)) + copy(*out, *in) + } + if in.PagerDutyImageConfigs != nil { + in, out := &in.PagerDutyImageConfigs, &out.PagerDutyImageConfigs + *out = make([]PagerDutyImageConfig, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.PagerDutyLinkConfigs != nil { + in, out := &in.PagerDutyLinkConfigs, &out.PagerDutyLinkConfigs + *out = make([]PagerDutyLinkConfig, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.HTTPConfig != nil { + in, out := &in.HTTPConfig, &out.HTTPConfig + *out = new(HTTPConfig) + (*in).DeepCopyInto(*out) + } + if in.Source != nil { + in, out := &in.Source, &out.Source + *out = new(string) + **out = **in + } + if in.Timeout != nil { + in, out := &in.Timeout, &out.Timeout + *out = new(monitoringv1.Duration) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PagerDutyConfig. +func (in *PagerDutyConfig) DeepCopy() *PagerDutyConfig { + if in == nil { + return nil + } + out := new(PagerDutyConfig) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PagerDutyImageConfig) DeepCopyInto(out *PagerDutyImageConfig) { + *out = *in + if in.Src != nil { + in, out := &in.Src, &out.Src + *out = new(string) + **out = **in + } + if in.Href != nil { + in, out := &in.Href, &out.Href + *out = new(string) + **out = **in + } + if in.Alt != nil { + in, out := &in.Alt, &out.Alt + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PagerDutyImageConfig. +func (in *PagerDutyImageConfig) DeepCopy() *PagerDutyImageConfig { + if in == nil { + return nil + } + out := new(PagerDutyImageConfig) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PagerDutyLinkConfig) DeepCopyInto(out *PagerDutyLinkConfig) { + *out = *in + if in.Href != nil { + in, out := &in.Href, &out.Href + *out = new(string) + **out = **in + } + if in.Text != nil { + in, out := &in.Text, &out.Text + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PagerDutyLinkConfig. +func (in *PagerDutyLinkConfig) DeepCopy() *PagerDutyLinkConfig { + if in == nil { + return nil + } + out := new(PagerDutyLinkConfig) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PushoverConfig) DeepCopyInto(out *PushoverConfig) { + *out = *in + if in.SendResolved != nil { + in, out := &in.SendResolved, &out.SendResolved + *out = new(bool) + **out = **in + } + if in.UserKey != nil { + in, out := &in.UserKey, &out.UserKey + *out = new(SecretKeySelector) + **out = **in + } + if in.UserKeyFile != nil { + in, out := &in.UserKeyFile, &out.UserKeyFile + *out = new(string) + **out = **in + } + if in.Token != nil { + in, out := &in.Token, &out.Token + *out = new(SecretKeySelector) + **out = **in + } + if in.TokenFile != nil { + in, out := &in.TokenFile, &out.TokenFile + *out = new(string) + **out = **in + } + if in.Title != nil { + in, out := &in.Title, &out.Title + *out = new(string) + **out = **in + } + if in.Message != nil { + in, out := &in.Message, &out.Message + *out = new(string) + **out = **in + } + if in.URLTitle != nil { + in, out := &in.URLTitle, &out.URLTitle + *out = new(string) + **out = **in + } + if in.TTL != nil { + in, out := &in.TTL, &out.TTL + *out = new(monitoringv1.Duration) + **out = **in + } + if in.Device != nil { + in, out := &in.Device, &out.Device + *out = new(string) + **out = **in + } + if in.Sound != nil { + in, out := &in.Sound, &out.Sound + *out = new(string) + **out = **in + } + if in.Priority != nil { + in, out := &in.Priority, &out.Priority + *out = new(string) + **out = **in + } + if in.Retry != nil { + in, out := &in.Retry, &out.Retry + *out = new(string) + **out = **in + } + if in.Expire != nil { + in, out := &in.Expire, &out.Expire + *out = new(string) + **out = **in + } + if in.HTML != nil { + in, out := &in.HTML, &out.HTML + *out = new(bool) + **out = **in + } + if in.Monospace != nil { + in, out := &in.Monospace, &out.Monospace + *out = new(bool) + **out = **in + } + if in.HTTPConfig != nil { + in, out := &in.HTTPConfig, &out.HTTPConfig + *out = new(HTTPConfig) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PushoverConfig. +func (in *PushoverConfig) DeepCopy() *PushoverConfig { + if in == nil { + return nil + } + out := new(PushoverConfig) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Receiver) DeepCopyInto(out *Receiver) { + *out = *in + if in.OpsGenieConfigs != nil { + in, out := &in.OpsGenieConfigs, &out.OpsGenieConfigs + *out = make([]OpsGenieConfig, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.PagerDutyConfigs != nil { + in, out := &in.PagerDutyConfigs, &out.PagerDutyConfigs + *out = make([]PagerDutyConfig, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.DiscordConfigs != nil { + in, out := &in.DiscordConfigs, &out.DiscordConfigs + *out = make([]DiscordConfig, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.SlackConfigs != nil { + in, out := &in.SlackConfigs, &out.SlackConfigs + *out = make([]SlackConfig, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.WebhookConfigs != nil { + in, out := &in.WebhookConfigs, &out.WebhookConfigs + *out = make([]WebhookConfig, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.WeChatConfigs != nil { + in, out := &in.WeChatConfigs, &out.WeChatConfigs + *out = make([]WeChatConfig, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.EmailConfigs != nil { + in, out := &in.EmailConfigs, &out.EmailConfigs + *out = make([]EmailConfig, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.VictorOpsConfigs != nil { + in, out := &in.VictorOpsConfigs, &out.VictorOpsConfigs + *out = make([]VictorOpsConfig, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.PushoverConfigs != nil { + in, out := &in.PushoverConfigs, &out.PushoverConfigs + *out = make([]PushoverConfig, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.SNSConfigs != nil { + in, out := &in.SNSConfigs, &out.SNSConfigs + *out = make([]SNSConfig, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.TelegramConfigs != nil { + in, out := &in.TelegramConfigs, &out.TelegramConfigs + *out = make([]TelegramConfig, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.WebexConfigs != nil { + in, out := &in.WebexConfigs, &out.WebexConfigs + *out = make([]WebexConfig, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.MSTeamsConfigs != nil { + in, out := &in.MSTeamsConfigs, &out.MSTeamsConfigs + *out = make([]MSTeamsConfig, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.MSTeamsV2Configs != nil { + in, out := &in.MSTeamsV2Configs, &out.MSTeamsV2Configs + *out = make([]MSTeamsV2Config, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.RocketChatConfigs != nil { + in, out := &in.RocketChatConfigs, &out.RocketChatConfigs + *out = make([]RocketChatConfig, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Receiver. +func (in *Receiver) DeepCopy() *Receiver { + if in == nil { + return nil + } + out := new(Receiver) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RocketChatActionConfig) DeepCopyInto(out *RocketChatActionConfig) { + *out = *in + if in.Text != nil { + in, out := &in.Text, &out.Text + *out = new(string) + **out = **in + } + if in.URL != nil { + in, out := &in.URL, &out.URL + *out = new(string) + **out = **in + } + if in.Msg != nil { + in, out := &in.Msg, &out.Msg + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RocketChatActionConfig. +func (in *RocketChatActionConfig) DeepCopy() *RocketChatActionConfig { + if in == nil { + return nil + } + out := new(RocketChatActionConfig) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RocketChatConfig) DeepCopyInto(out *RocketChatConfig) { + *out = *in + if in.SendResolved != nil { + in, out := &in.SendResolved, &out.SendResolved + *out = new(bool) + **out = **in + } + if in.APIURL != nil { + in, out := &in.APIURL, &out.APIURL + *out = new(URL) + **out = **in + } + if in.Channel != nil { + in, out := &in.Channel, &out.Channel + *out = new(string) + **out = **in + } + in.Token.DeepCopyInto(&out.Token) + in.TokenID.DeepCopyInto(&out.TokenID) + if in.Color != nil { + in, out := &in.Color, &out.Color + *out = new(string) + **out = **in + } + if in.Emoji != nil { + in, out := &in.Emoji, &out.Emoji + *out = new(string) + **out = **in + } + if in.IconURL != nil { + in, out := &in.IconURL, &out.IconURL + *out = new(string) + **out = **in + } + if in.Text != nil { + in, out := &in.Text, &out.Text + *out = new(string) + **out = **in + } + if in.Title != nil { + in, out := &in.Title, &out.Title + *out = new(string) + **out = **in + } + if in.TitleLink != nil { + in, out := &in.TitleLink, &out.TitleLink + *out = new(string) + **out = **in + } + if in.Fields != nil { + in, out := &in.Fields, &out.Fields + *out = make([]RocketChatFieldConfig, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.ShortFields != nil { + in, out := &in.ShortFields, &out.ShortFields + *out = new(bool) + **out = **in + } + if in.ImageURL != nil { + in, out := &in.ImageURL, &out.ImageURL + *out = new(string) + **out = **in + } + if in.ThumbURL != nil { + in, out := &in.ThumbURL, &out.ThumbURL + *out = new(string) + **out = **in + } + if in.LinkNames != nil { + in, out := &in.LinkNames, &out.LinkNames + *out = new(bool) + **out = **in + } + if in.Actions != nil { + in, out := &in.Actions, &out.Actions + *out = make([]RocketChatActionConfig, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.HTTPConfig != nil { + in, out := &in.HTTPConfig, &out.HTTPConfig + *out = new(HTTPConfig) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RocketChatConfig. +func (in *RocketChatConfig) DeepCopy() *RocketChatConfig { + if in == nil { + return nil + } + out := new(RocketChatConfig) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RocketChatFieldConfig) DeepCopyInto(out *RocketChatFieldConfig) { + *out = *in + if in.Title != nil { + in, out := &in.Title, &out.Title + *out = new(string) + **out = **in + } + if in.Value != nil { + in, out := &in.Value, &out.Value + *out = new(string) + **out = **in + } + if in.Short != nil { + in, out := &in.Short, &out.Short + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RocketChatFieldConfig. +func (in *RocketChatFieldConfig) DeepCopy() *RocketChatFieldConfig { + if in == nil { + return nil + } + out := new(RocketChatFieldConfig) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Route) DeepCopyInto(out *Route) { + *out = *in + if in.GroupBy != nil { + in, out := &in.GroupBy, &out.GroupBy + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.Matchers != nil { + in, out := &in.Matchers, &out.Matchers + *out = make([]Matcher, len(*in)) + copy(*out, *in) + } + if in.Routes != nil { + in, out := &in.Routes, &out.Routes + *out = make([]v1.JSON, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.MuteTimeIntervals != nil { + in, out := &in.MuteTimeIntervals, &out.MuteTimeIntervals + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.ActiveTimeIntervals != nil { + in, out := &in.ActiveTimeIntervals, &out.ActiveTimeIntervals + *out = make([]string, len(*in)) + copy(*out, *in) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Route. +func (in *Route) DeepCopy() *Route { + if in == nil { + return nil + } + out := new(Route) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SNSConfig) DeepCopyInto(out *SNSConfig) { + *out = *in + if in.SendResolved != nil { + in, out := &in.SendResolved, &out.SendResolved + *out = new(bool) + **out = **in + } + if in.Sigv4 != nil { + in, out := &in.Sigv4, &out.Sigv4 + *out = new(monitoringv1.Sigv4) + (*in).DeepCopyInto(*out) + } + if in.Attributes != nil { + in, out := &in.Attributes, &out.Attributes + *out = make(map[string]string, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } + if in.HTTPConfig != nil { + in, out := &in.HTTPConfig, &out.HTTPConfig + *out = new(HTTPConfig) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SNSConfig. +func (in *SNSConfig) DeepCopy() *SNSConfig { + if in == nil { + return nil + } + out := new(SNSConfig) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SecretKeySelector) DeepCopyInto(out *SecretKeySelector) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SecretKeySelector. +func (in *SecretKeySelector) DeepCopy() *SecretKeySelector { + if in == nil { + return nil + } + out := new(SecretKeySelector) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SlackAction) DeepCopyInto(out *SlackAction) { + *out = *in + if in.Style != nil { + in, out := &in.Style, &out.Style + *out = new(string) + **out = **in + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.Value != nil { + in, out := &in.Value, &out.Value + *out = new(string) + **out = **in + } + if in.ConfirmField != nil { + in, out := &in.ConfirmField, &out.ConfirmField + *out = new(SlackConfirmationField) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SlackAction. +func (in *SlackAction) DeepCopy() *SlackAction { + if in == nil { + return nil + } + out := new(SlackAction) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SlackConfig) DeepCopyInto(out *SlackConfig) { + *out = *in + if in.SendResolved != nil { + in, out := &in.SendResolved, &out.SendResolved + *out = new(bool) + **out = **in + } + if in.APIURL != nil { + in, out := &in.APIURL, &out.APIURL + *out = new(SecretKeySelector) + **out = **in + } + if in.Channel != nil { + in, out := &in.Channel, &out.Channel + *out = new(string) + **out = **in + } + if in.Username != nil { + in, out := &in.Username, &out.Username + *out = new(string) + **out = **in + } + if in.Color != nil { + in, out := &in.Color, &out.Color + *out = new(string) + **out = **in + } + if in.Title != nil { + in, out := &in.Title, &out.Title + *out = new(string) + **out = **in + } + if in.Pretext != nil { + in, out := &in.Pretext, &out.Pretext + *out = new(string) + **out = **in + } + if in.Text != nil { + in, out := &in.Text, &out.Text + *out = new(string) + **out = **in + } + if in.Fields != nil { + in, out := &in.Fields, &out.Fields + *out = make([]SlackField, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.ShortFields != nil { + in, out := &in.ShortFields, &out.ShortFields + *out = new(bool) + **out = **in + } + if in.Footer != nil { + in, out := &in.Footer, &out.Footer + *out = new(string) + **out = **in + } + if in.Fallback != nil { + in, out := &in.Fallback, &out.Fallback + *out = new(string) + **out = **in + } + if in.CallbackID != nil { + in, out := &in.CallbackID, &out.CallbackID + *out = new(string) + **out = **in + } + if in.IconEmoji != nil { + in, out := &in.IconEmoji, &out.IconEmoji + *out = new(string) + **out = **in + } + if in.LinkNames != nil { + in, out := &in.LinkNames, &out.LinkNames + *out = new(bool) + **out = **in + } + if in.MrkdwnIn != nil { + in, out := &in.MrkdwnIn, &out.MrkdwnIn + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.Actions != nil { + in, out := &in.Actions, &out.Actions + *out = make([]SlackAction, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.HTTPConfig != nil { + in, out := &in.HTTPConfig, &out.HTTPConfig + *out = new(HTTPConfig) + (*in).DeepCopyInto(*out) + } + if in.Timeout != nil { + in, out := &in.Timeout, &out.Timeout + *out = new(monitoringv1.Duration) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SlackConfig. +func (in *SlackConfig) DeepCopy() *SlackConfig { + if in == nil { + return nil + } + out := new(SlackConfig) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SlackConfirmationField) DeepCopyInto(out *SlackConfirmationField) { + *out = *in + if in.Title != nil { + in, out := &in.Title, &out.Title + *out = new(string) + **out = **in + } + if in.OkText != nil { + in, out := &in.OkText, &out.OkText + *out = new(string) + **out = **in + } + if in.DismissText != nil { + in, out := &in.DismissText, &out.DismissText + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SlackConfirmationField. +func (in *SlackConfirmationField) DeepCopy() *SlackConfirmationField { + if in == nil { + return nil + } + out := new(SlackConfirmationField) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SlackField) DeepCopyInto(out *SlackField) { + *out = *in + if in.Short != nil { + in, out := &in.Short, &out.Short + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SlackField. +func (in *SlackField) DeepCopy() *SlackField { + if in == nil { + return nil + } + out := new(SlackField) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TelegramConfig) DeepCopyInto(out *TelegramConfig) { + *out = *in + if in.SendResolved != nil { + in, out := &in.SendResolved, &out.SendResolved + *out = new(bool) + **out = **in + } + if in.APIURL != nil { + in, out := &in.APIURL, &out.APIURL + *out = new(URL) + **out = **in + } + if in.BotToken != nil { + in, out := &in.BotToken, &out.BotToken + *out = new(SecretKeySelector) + **out = **in + } + if in.BotTokenFile != nil { + in, out := &in.BotTokenFile, &out.BotTokenFile + *out = new(string) + **out = **in + } + if in.MessageThreadID != nil { + in, out := &in.MessageThreadID, &out.MessageThreadID + *out = new(int64) + **out = **in + } + if in.DisableNotifications != nil { + in, out := &in.DisableNotifications, &out.DisableNotifications + *out = new(bool) + **out = **in + } + if in.HTTPConfig != nil { + in, out := &in.HTTPConfig, &out.HTTPConfig + *out = new(HTTPConfig) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TelegramConfig. +func (in *TelegramConfig) DeepCopy() *TelegramConfig { + if in == nil { + return nil + } + out := new(TelegramConfig) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TimeInterval) DeepCopyInto(out *TimeInterval) { + *out = *in + if in.TimeIntervals != nil { + in, out := &in.TimeIntervals, &out.TimeIntervals + *out = make([]TimePeriod, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TimeInterval. +func (in *TimeInterval) DeepCopy() *TimeInterval { + if in == nil { + return nil + } + out := new(TimeInterval) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TimePeriod) DeepCopyInto(out *TimePeriod) { + *out = *in + if in.Times != nil { + in, out := &in.Times, &out.Times + *out = make([]TimeRange, len(*in)) + copy(*out, *in) + } + if in.Weekdays != nil { + in, out := &in.Weekdays, &out.Weekdays + *out = make([]WeekdayRange, len(*in)) + copy(*out, *in) + } + if in.DaysOfMonth != nil { + in, out := &in.DaysOfMonth, &out.DaysOfMonth + *out = make([]DayOfMonthRange, len(*in)) + copy(*out, *in) + } + if in.Months != nil { + in, out := &in.Months, &out.Months + *out = make([]MonthRange, len(*in)) + copy(*out, *in) + } + if in.Years != nil { + in, out := &in.Years, &out.Years + *out = make([]YearRange, len(*in)) + copy(*out, *in) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TimePeriod. +func (in *TimePeriod) DeepCopy() *TimePeriod { + if in == nil { + return nil + } + out := new(TimePeriod) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TimeRange) DeepCopyInto(out *TimeRange) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TimeRange. +func (in *TimeRange) DeepCopy() *TimeRange { + if in == nil { + return nil + } + out := new(TimeRange) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *VictorOpsConfig) DeepCopyInto(out *VictorOpsConfig) { + *out = *in + if in.SendResolved != nil { + in, out := &in.SendResolved, &out.SendResolved + *out = new(bool) + **out = **in + } + if in.APIKey != nil { + in, out := &in.APIKey, &out.APIKey + *out = new(SecretKeySelector) + **out = **in + } + if in.APIURL != nil { + in, out := &in.APIURL, &out.APIURL + *out = new(URL) + **out = **in + } + if in.MessageType != nil { + in, out := &in.MessageType, &out.MessageType + *out = new(string) + **out = **in + } + if in.EntityDisplayName != nil { + in, out := &in.EntityDisplayName, &out.EntityDisplayName + *out = new(string) + **out = **in + } + if in.StateMessage != nil { + in, out := &in.StateMessage, &out.StateMessage + *out = new(string) + **out = **in + } + if in.MonitoringTool != nil { + in, out := &in.MonitoringTool, &out.MonitoringTool + *out = new(string) + **out = **in + } + if in.CustomFields != nil { + in, out := &in.CustomFields, &out.CustomFields + *out = make([]KeyValue, len(*in)) + copy(*out, *in) + } + if in.HTTPConfig != nil { + in, out := &in.HTTPConfig, &out.HTTPConfig + *out = new(HTTPConfig) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VictorOpsConfig. +func (in *VictorOpsConfig) DeepCopy() *VictorOpsConfig { + if in == nil { + return nil + } + out := new(VictorOpsConfig) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *WeChatConfig) DeepCopyInto(out *WeChatConfig) { + *out = *in + if in.SendResolved != nil { + in, out := &in.SendResolved, &out.SendResolved + *out = new(bool) + **out = **in + } + if in.APISecret != nil { + in, out := &in.APISecret, &out.APISecret + *out = new(SecretKeySelector) + **out = **in + } + if in.APIURL != nil { + in, out := &in.APIURL, &out.APIURL + *out = new(URL) + **out = **in + } + if in.HTTPConfig != nil { + in, out := &in.HTTPConfig, &out.HTTPConfig + *out = new(HTTPConfig) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WeChatConfig. +func (in *WeChatConfig) DeepCopy() *WeChatConfig { + if in == nil { + return nil + } + out := new(WeChatConfig) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *WebexConfig) DeepCopyInto(out *WebexConfig) { + *out = *in + if in.SendResolved != nil { + in, out := &in.SendResolved, &out.SendResolved + *out = new(bool) + **out = **in + } + if in.APIURL != nil { + in, out := &in.APIURL, &out.APIURL + *out = new(URL) + **out = **in + } + if in.HTTPConfig != nil { + in, out := &in.HTTPConfig, &out.HTTPConfig + *out = new(HTTPConfig) + (*in).DeepCopyInto(*out) + } + if in.Message != nil { + in, out := &in.Message, &out.Message + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WebexConfig. +func (in *WebexConfig) DeepCopy() *WebexConfig { + if in == nil { + return nil + } + out := new(WebexConfig) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *WebhookConfig) DeepCopyInto(out *WebhookConfig) { + *out = *in + if in.SendResolved != nil { + in, out := &in.SendResolved, &out.SendResolved + *out = new(bool) + **out = **in + } + if in.URL != nil { + in, out := &in.URL, &out.URL + *out = new(string) + **out = **in + } + if in.URLSecret != nil { + in, out := &in.URLSecret, &out.URLSecret + *out = new(SecretKeySelector) + **out = **in + } + if in.HTTPConfig != nil { + in, out := &in.HTTPConfig, &out.HTTPConfig + *out = new(HTTPConfig) + (*in).DeepCopyInto(*out) + } + if in.Timeout != nil { + in, out := &in.Timeout, &out.Timeout + *out = new(monitoringv1.Duration) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WebhookConfig. +func (in *WebhookConfig) DeepCopy() *WebhookConfig { + if in == nil { + return nil + } + out := new(WebhookConfig) + in.DeepCopyInto(out) + return out +} diff --git a/vendor/github.com/prometheus-operator/prometheus-operator/pkg/client/LICENSE b/vendor/github.com/prometheus-operator/prometheus-operator/pkg/client/LICENSE new file mode 100644 index 0000000000..74e6ec6963 --- /dev/null +++ b/vendor/github.com/prometheus-operator/prometheus-operator/pkg/client/LICENSE @@ -0,0 +1,201 @@ +Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "{}" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright {yyyy} {name of copyright owner} + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/prometheus-operator/prometheus-operator/pkg/client/applyconfiguration/monitoring/v1/alertingspec.go b/vendor/github.com/prometheus-operator/prometheus-operator/pkg/client/applyconfiguration/monitoring/v1/alertingspec.go new file mode 100644 index 0000000000..b66f669678 --- /dev/null +++ b/vendor/github.com/prometheus-operator/prometheus-operator/pkg/client/applyconfiguration/monitoring/v1/alertingspec.go @@ -0,0 +1,42 @@ +// Copyright The prometheus-operator Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1 + +// AlertingSpecApplyConfiguration represents a declarative configuration of the AlertingSpec type for use +// with apply. +type AlertingSpecApplyConfiguration struct { + Alertmanagers []AlertmanagerEndpointsApplyConfiguration `json:"alertmanagers,omitempty"` +} + +// AlertingSpecApplyConfiguration constructs a declarative configuration of the AlertingSpec type for use with +// apply. +func AlertingSpec() *AlertingSpecApplyConfiguration { + return &AlertingSpecApplyConfiguration{} +} + +// WithAlertmanagers adds the given value to the Alertmanagers field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the Alertmanagers field. +func (b *AlertingSpecApplyConfiguration) WithAlertmanagers(values ...*AlertmanagerEndpointsApplyConfiguration) *AlertingSpecApplyConfiguration { + for i := range values { + if values[i] == nil { + panic("nil value passed to WithAlertmanagers") + } + b.Alertmanagers = append(b.Alertmanagers, *values[i]) + } + return b +} diff --git a/vendor/github.com/prometheus-operator/prometheus-operator/pkg/client/applyconfiguration/monitoring/v1/alertmanager.go b/vendor/github.com/prometheus-operator/prometheus-operator/pkg/client/applyconfiguration/monitoring/v1/alertmanager.go new file mode 100644 index 0000000000..bf74aa7411 --- /dev/null +++ b/vendor/github.com/prometheus-operator/prometheus-operator/pkg/client/applyconfiguration/monitoring/v1/alertmanager.go @@ -0,0 +1,240 @@ +// Copyright The prometheus-operator Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1 + +import ( + apismetav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + types "k8s.io/apimachinery/pkg/types" + metav1 "k8s.io/client-go/applyconfigurations/meta/v1" +) + +// AlertmanagerApplyConfiguration represents a declarative configuration of the Alertmanager type for use +// with apply. +type AlertmanagerApplyConfiguration struct { + metav1.TypeMetaApplyConfiguration `json:",inline"` + *metav1.ObjectMetaApplyConfiguration `json:"metadata,omitempty"` + Spec *AlertmanagerSpecApplyConfiguration `json:"spec,omitempty"` + Status *AlertmanagerStatusApplyConfiguration `json:"status,omitempty"` +} + +// Alertmanager constructs a declarative configuration of the Alertmanager type for use with +// apply. +func Alertmanager(name, namespace string) *AlertmanagerApplyConfiguration { + b := &AlertmanagerApplyConfiguration{} + b.WithName(name) + b.WithNamespace(namespace) + b.WithKind("Alertmanager") + b.WithAPIVersion("monitoring.coreos.com/v1") + return b +} +func (b AlertmanagerApplyConfiguration) IsApplyConfiguration() {} + +// WithKind sets the Kind field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Kind field is set to the value of the last call. +func (b *AlertmanagerApplyConfiguration) WithKind(value string) *AlertmanagerApplyConfiguration { + b.TypeMetaApplyConfiguration.Kind = &value + return b +} + +// WithAPIVersion sets the APIVersion field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the APIVersion field is set to the value of the last call. +func (b *AlertmanagerApplyConfiguration) WithAPIVersion(value string) *AlertmanagerApplyConfiguration { + b.TypeMetaApplyConfiguration.APIVersion = &value + return b +} + +// WithName sets the Name field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Name field is set to the value of the last call. +func (b *AlertmanagerApplyConfiguration) WithName(value string) *AlertmanagerApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.ObjectMetaApplyConfiguration.Name = &value + return b +} + +// WithGenerateName sets the GenerateName field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the GenerateName field is set to the value of the last call. +func (b *AlertmanagerApplyConfiguration) WithGenerateName(value string) *AlertmanagerApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.ObjectMetaApplyConfiguration.GenerateName = &value + return b +} + +// WithNamespace sets the Namespace field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Namespace field is set to the value of the last call. +func (b *AlertmanagerApplyConfiguration) WithNamespace(value string) *AlertmanagerApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.ObjectMetaApplyConfiguration.Namespace = &value + return b +} + +// WithUID sets the UID field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the UID field is set to the value of the last call. +func (b *AlertmanagerApplyConfiguration) WithUID(value types.UID) *AlertmanagerApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.ObjectMetaApplyConfiguration.UID = &value + return b +} + +// WithResourceVersion sets the ResourceVersion field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the ResourceVersion field is set to the value of the last call. +func (b *AlertmanagerApplyConfiguration) WithResourceVersion(value string) *AlertmanagerApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.ObjectMetaApplyConfiguration.ResourceVersion = &value + return b +} + +// WithGeneration sets the Generation field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Generation field is set to the value of the last call. +func (b *AlertmanagerApplyConfiguration) WithGeneration(value int64) *AlertmanagerApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.ObjectMetaApplyConfiguration.Generation = &value + return b +} + +// WithCreationTimestamp sets the CreationTimestamp field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the CreationTimestamp field is set to the value of the last call. +func (b *AlertmanagerApplyConfiguration) WithCreationTimestamp(value apismetav1.Time) *AlertmanagerApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.ObjectMetaApplyConfiguration.CreationTimestamp = &value + return b +} + +// WithDeletionTimestamp sets the DeletionTimestamp field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the DeletionTimestamp field is set to the value of the last call. +func (b *AlertmanagerApplyConfiguration) WithDeletionTimestamp(value apismetav1.Time) *AlertmanagerApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.ObjectMetaApplyConfiguration.DeletionTimestamp = &value + return b +} + +// WithDeletionGracePeriodSeconds sets the DeletionGracePeriodSeconds field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the DeletionGracePeriodSeconds field is set to the value of the last call. +func (b *AlertmanagerApplyConfiguration) WithDeletionGracePeriodSeconds(value int64) *AlertmanagerApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.ObjectMetaApplyConfiguration.DeletionGracePeriodSeconds = &value + return b +} + +// WithLabels puts the entries into the Labels field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, the entries provided by each call will be put on the Labels field, +// overwriting an existing map entries in Labels field with the same key. +func (b *AlertmanagerApplyConfiguration) WithLabels(entries map[string]string) *AlertmanagerApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + if b.ObjectMetaApplyConfiguration.Labels == nil && len(entries) > 0 { + b.ObjectMetaApplyConfiguration.Labels = make(map[string]string, len(entries)) + } + for k, v := range entries { + b.ObjectMetaApplyConfiguration.Labels[k] = v + } + return b +} + +// WithAnnotations puts the entries into the Annotations field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, the entries provided by each call will be put on the Annotations field, +// overwriting an existing map entries in Annotations field with the same key. +func (b *AlertmanagerApplyConfiguration) WithAnnotations(entries map[string]string) *AlertmanagerApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + if b.ObjectMetaApplyConfiguration.Annotations == nil && len(entries) > 0 { + b.ObjectMetaApplyConfiguration.Annotations = make(map[string]string, len(entries)) + } + for k, v := range entries { + b.ObjectMetaApplyConfiguration.Annotations[k] = v + } + return b +} + +// WithOwnerReferences adds the given value to the OwnerReferences field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the OwnerReferences field. +func (b *AlertmanagerApplyConfiguration) WithOwnerReferences(values ...*metav1.OwnerReferenceApplyConfiguration) *AlertmanagerApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + for i := range values { + if values[i] == nil { + panic("nil value passed to WithOwnerReferences") + } + b.ObjectMetaApplyConfiguration.OwnerReferences = append(b.ObjectMetaApplyConfiguration.OwnerReferences, *values[i]) + } + return b +} + +// WithFinalizers adds the given value to the Finalizers field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the Finalizers field. +func (b *AlertmanagerApplyConfiguration) WithFinalizers(values ...string) *AlertmanagerApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + for i := range values { + b.ObjectMetaApplyConfiguration.Finalizers = append(b.ObjectMetaApplyConfiguration.Finalizers, values[i]) + } + return b +} + +func (b *AlertmanagerApplyConfiguration) ensureObjectMetaApplyConfigurationExists() { + if b.ObjectMetaApplyConfiguration == nil { + b.ObjectMetaApplyConfiguration = &metav1.ObjectMetaApplyConfiguration{} + } +} + +// WithSpec sets the Spec field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Spec field is set to the value of the last call. +func (b *AlertmanagerApplyConfiguration) WithSpec(value *AlertmanagerSpecApplyConfiguration) *AlertmanagerApplyConfiguration { + b.Spec = value + return b +} + +// WithStatus sets the Status field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Status field is set to the value of the last call. +func (b *AlertmanagerApplyConfiguration) WithStatus(value *AlertmanagerStatusApplyConfiguration) *AlertmanagerApplyConfiguration { + b.Status = value + return b +} + +// GetKind retrieves the value of the Kind field in the declarative configuration. +func (b *AlertmanagerApplyConfiguration) GetKind() *string { + return b.TypeMetaApplyConfiguration.Kind +} + +// GetAPIVersion retrieves the value of the APIVersion field in the declarative configuration. +func (b *AlertmanagerApplyConfiguration) GetAPIVersion() *string { + return b.TypeMetaApplyConfiguration.APIVersion +} + +// GetName retrieves the value of the Name field in the declarative configuration. +func (b *AlertmanagerApplyConfiguration) GetName() *string { + b.ensureObjectMetaApplyConfigurationExists() + return b.ObjectMetaApplyConfiguration.Name +} + +// GetNamespace retrieves the value of the Namespace field in the declarative configuration. +func (b *AlertmanagerApplyConfiguration) GetNamespace() *string { + b.ensureObjectMetaApplyConfigurationExists() + return b.ObjectMetaApplyConfiguration.Namespace +} diff --git a/vendor/github.com/prometheus-operator/prometheus-operator/pkg/client/applyconfiguration/monitoring/v1/alertmanagerconfigmatcherstrategy.go b/vendor/github.com/prometheus-operator/prometheus-operator/pkg/client/applyconfiguration/monitoring/v1/alertmanagerconfigmatcherstrategy.go new file mode 100644 index 0000000000..bd36fb7cac --- /dev/null +++ b/vendor/github.com/prometheus-operator/prometheus-operator/pkg/client/applyconfiguration/monitoring/v1/alertmanagerconfigmatcherstrategy.go @@ -0,0 +1,41 @@ +// Copyright The prometheus-operator Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1 + +import ( + monitoringv1 "github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring/v1" +) + +// AlertmanagerConfigMatcherStrategyApplyConfiguration represents a declarative configuration of the AlertmanagerConfigMatcherStrategy type for use +// with apply. +type AlertmanagerConfigMatcherStrategyApplyConfiguration struct { + Type *monitoringv1.AlertmanagerConfigMatcherStrategyType `json:"type,omitempty"` +} + +// AlertmanagerConfigMatcherStrategyApplyConfiguration constructs a declarative configuration of the AlertmanagerConfigMatcherStrategy type for use with +// apply. +func AlertmanagerConfigMatcherStrategy() *AlertmanagerConfigMatcherStrategyApplyConfiguration { + return &AlertmanagerConfigMatcherStrategyApplyConfiguration{} +} + +// WithType sets the Type field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Type field is set to the value of the last call. +func (b *AlertmanagerConfigMatcherStrategyApplyConfiguration) WithType(value monitoringv1.AlertmanagerConfigMatcherStrategyType) *AlertmanagerConfigMatcherStrategyApplyConfiguration { + b.Type = &value + return b +} diff --git a/vendor/github.com/prometheus-operator/prometheus-operator/pkg/client/applyconfiguration/monitoring/v1/alertmanagerconfiguration.go b/vendor/github.com/prometheus-operator/prometheus-operator/pkg/client/applyconfiguration/monitoring/v1/alertmanagerconfiguration.go new file mode 100644 index 0000000000..c6b866d1e1 --- /dev/null +++ b/vendor/github.com/prometheus-operator/prometheus-operator/pkg/client/applyconfiguration/monitoring/v1/alertmanagerconfiguration.go @@ -0,0 +1,60 @@ +// Copyright The prometheus-operator Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1 + +// AlertmanagerConfigurationApplyConfiguration represents a declarative configuration of the AlertmanagerConfiguration type for use +// with apply. +type AlertmanagerConfigurationApplyConfiguration struct { + Name *string `json:"name,omitempty"` + Global *AlertmanagerGlobalConfigApplyConfiguration `json:"global,omitempty"` + Templates []SecretOrConfigMapApplyConfiguration `json:"templates,omitempty"` +} + +// AlertmanagerConfigurationApplyConfiguration constructs a declarative configuration of the AlertmanagerConfiguration type for use with +// apply. +func AlertmanagerConfiguration() *AlertmanagerConfigurationApplyConfiguration { + return &AlertmanagerConfigurationApplyConfiguration{} +} + +// WithName sets the Name field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Name field is set to the value of the last call. +func (b *AlertmanagerConfigurationApplyConfiguration) WithName(value string) *AlertmanagerConfigurationApplyConfiguration { + b.Name = &value + return b +} + +// WithGlobal sets the Global field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Global field is set to the value of the last call. +func (b *AlertmanagerConfigurationApplyConfiguration) WithGlobal(value *AlertmanagerGlobalConfigApplyConfiguration) *AlertmanagerConfigurationApplyConfiguration { + b.Global = value + return b +} + +// WithTemplates adds the given value to the Templates field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the Templates field. +func (b *AlertmanagerConfigurationApplyConfiguration) WithTemplates(values ...*SecretOrConfigMapApplyConfiguration) *AlertmanagerConfigurationApplyConfiguration { + for i := range values { + if values[i] == nil { + panic("nil value passed to WithTemplates") + } + b.Templates = append(b.Templates, *values[i]) + } + return b +} diff --git a/vendor/github.com/prometheus-operator/prometheus-operator/pkg/client/applyconfiguration/monitoring/v1/alertmanagerendpoints.go b/vendor/github.com/prometheus-operator/prometheus-operator/pkg/client/applyconfiguration/monitoring/v1/alertmanagerendpoints.go new file mode 100644 index 0000000000..8743393c84 --- /dev/null +++ b/vendor/github.com/prometheus-operator/prometheus-operator/pkg/client/applyconfiguration/monitoring/v1/alertmanagerendpoints.go @@ -0,0 +1,218 @@ +// Copyright The prometheus-operator Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1 + +import ( + monitoringv1 "github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring/v1" + corev1 "k8s.io/api/core/v1" + intstr "k8s.io/apimachinery/pkg/util/intstr" +) + +// AlertmanagerEndpointsApplyConfiguration represents a declarative configuration of the AlertmanagerEndpoints type for use +// with apply. +type AlertmanagerEndpointsApplyConfiguration struct { + Namespace *string `json:"namespace,omitempty"` + Name *string `json:"name,omitempty"` + Port *intstr.IntOrString `json:"port,omitempty"` + Scheme *monitoringv1.Scheme `json:"scheme,omitempty"` + PathPrefix *string `json:"pathPrefix,omitempty"` + TLSConfig *TLSConfigApplyConfiguration `json:"tlsConfig,omitempty"` + BasicAuth *BasicAuthApplyConfiguration `json:"basicAuth,omitempty"` + BearerTokenFile *string `json:"bearerTokenFile,omitempty"` + Authorization *SafeAuthorizationApplyConfiguration `json:"authorization,omitempty"` + Sigv4 *Sigv4ApplyConfiguration `json:"sigv4,omitempty"` + ProxyConfigApplyConfiguration `json:",inline"` + APIVersion *monitoringv1.AlertmanagerAPIVersion `json:"apiVersion,omitempty"` + Timeout *monitoringv1.Duration `json:"timeout,omitempty"` + EnableHttp2 *bool `json:"enableHttp2,omitempty"` + RelabelConfigs []RelabelConfigApplyConfiguration `json:"relabelings,omitempty"` + AlertRelabelConfigs []RelabelConfigApplyConfiguration `json:"alertRelabelings,omitempty"` +} + +// AlertmanagerEndpointsApplyConfiguration constructs a declarative configuration of the AlertmanagerEndpoints type for use with +// apply. +func AlertmanagerEndpoints() *AlertmanagerEndpointsApplyConfiguration { + return &AlertmanagerEndpointsApplyConfiguration{} +} + +// WithNamespace sets the Namespace field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Namespace field is set to the value of the last call. +func (b *AlertmanagerEndpointsApplyConfiguration) WithNamespace(value string) *AlertmanagerEndpointsApplyConfiguration { + b.Namespace = &value + return b +} + +// WithName sets the Name field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Name field is set to the value of the last call. +func (b *AlertmanagerEndpointsApplyConfiguration) WithName(value string) *AlertmanagerEndpointsApplyConfiguration { + b.Name = &value + return b +} + +// WithPort sets the Port field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Port field is set to the value of the last call. +func (b *AlertmanagerEndpointsApplyConfiguration) WithPort(value intstr.IntOrString) *AlertmanagerEndpointsApplyConfiguration { + b.Port = &value + return b +} + +// WithScheme sets the Scheme field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Scheme field is set to the value of the last call. +func (b *AlertmanagerEndpointsApplyConfiguration) WithScheme(value monitoringv1.Scheme) *AlertmanagerEndpointsApplyConfiguration { + b.Scheme = &value + return b +} + +// WithPathPrefix sets the PathPrefix field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the PathPrefix field is set to the value of the last call. +func (b *AlertmanagerEndpointsApplyConfiguration) WithPathPrefix(value string) *AlertmanagerEndpointsApplyConfiguration { + b.PathPrefix = &value + return b +} + +// WithTLSConfig sets the TLSConfig field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the TLSConfig field is set to the value of the last call. +func (b *AlertmanagerEndpointsApplyConfiguration) WithTLSConfig(value *TLSConfigApplyConfiguration) *AlertmanagerEndpointsApplyConfiguration { + b.TLSConfig = value + return b +} + +// WithBasicAuth sets the BasicAuth field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the BasicAuth field is set to the value of the last call. +func (b *AlertmanagerEndpointsApplyConfiguration) WithBasicAuth(value *BasicAuthApplyConfiguration) *AlertmanagerEndpointsApplyConfiguration { + b.BasicAuth = value + return b +} + +// WithBearerTokenFile sets the BearerTokenFile field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the BearerTokenFile field is set to the value of the last call. +func (b *AlertmanagerEndpointsApplyConfiguration) WithBearerTokenFile(value string) *AlertmanagerEndpointsApplyConfiguration { + b.BearerTokenFile = &value + return b +} + +// WithAuthorization sets the Authorization field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Authorization field is set to the value of the last call. +func (b *AlertmanagerEndpointsApplyConfiguration) WithAuthorization(value *SafeAuthorizationApplyConfiguration) *AlertmanagerEndpointsApplyConfiguration { + b.Authorization = value + return b +} + +// WithSigv4 sets the Sigv4 field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Sigv4 field is set to the value of the last call. +func (b *AlertmanagerEndpointsApplyConfiguration) WithSigv4(value *Sigv4ApplyConfiguration) *AlertmanagerEndpointsApplyConfiguration { + b.Sigv4 = value + return b +} + +// WithProxyURL sets the ProxyURL field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the ProxyURL field is set to the value of the last call. +func (b *AlertmanagerEndpointsApplyConfiguration) WithProxyURL(value string) *AlertmanagerEndpointsApplyConfiguration { + b.ProxyConfigApplyConfiguration.ProxyURL = &value + return b +} + +// WithNoProxy sets the NoProxy field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the NoProxy field is set to the value of the last call. +func (b *AlertmanagerEndpointsApplyConfiguration) WithNoProxy(value string) *AlertmanagerEndpointsApplyConfiguration { + b.ProxyConfigApplyConfiguration.NoProxy = &value + return b +} + +// WithProxyFromEnvironment sets the ProxyFromEnvironment field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the ProxyFromEnvironment field is set to the value of the last call. +func (b *AlertmanagerEndpointsApplyConfiguration) WithProxyFromEnvironment(value bool) *AlertmanagerEndpointsApplyConfiguration { + b.ProxyConfigApplyConfiguration.ProxyFromEnvironment = &value + return b +} + +// WithProxyConnectHeader puts the entries into the ProxyConnectHeader field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, the entries provided by each call will be put on the ProxyConnectHeader field, +// overwriting an existing map entries in ProxyConnectHeader field with the same key. +func (b *AlertmanagerEndpointsApplyConfiguration) WithProxyConnectHeader(entries map[string][]corev1.SecretKeySelector) *AlertmanagerEndpointsApplyConfiguration { + if b.ProxyConfigApplyConfiguration.ProxyConnectHeader == nil && len(entries) > 0 { + b.ProxyConfigApplyConfiguration.ProxyConnectHeader = make(map[string][]corev1.SecretKeySelector, len(entries)) + } + for k, v := range entries { + b.ProxyConfigApplyConfiguration.ProxyConnectHeader[k] = v + } + return b +} + +// WithAPIVersion sets the APIVersion field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the APIVersion field is set to the value of the last call. +func (b *AlertmanagerEndpointsApplyConfiguration) WithAPIVersion(value monitoringv1.AlertmanagerAPIVersion) *AlertmanagerEndpointsApplyConfiguration { + b.APIVersion = &value + return b +} + +// WithTimeout sets the Timeout field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Timeout field is set to the value of the last call. +func (b *AlertmanagerEndpointsApplyConfiguration) WithTimeout(value monitoringv1.Duration) *AlertmanagerEndpointsApplyConfiguration { + b.Timeout = &value + return b +} + +// WithEnableHttp2 sets the EnableHttp2 field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the EnableHttp2 field is set to the value of the last call. +func (b *AlertmanagerEndpointsApplyConfiguration) WithEnableHttp2(value bool) *AlertmanagerEndpointsApplyConfiguration { + b.EnableHttp2 = &value + return b +} + +// WithRelabelConfigs adds the given value to the RelabelConfigs field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the RelabelConfigs field. +func (b *AlertmanagerEndpointsApplyConfiguration) WithRelabelConfigs(values ...*RelabelConfigApplyConfiguration) *AlertmanagerEndpointsApplyConfiguration { + for i := range values { + if values[i] == nil { + panic("nil value passed to WithRelabelConfigs") + } + b.RelabelConfigs = append(b.RelabelConfigs, *values[i]) + } + return b +} + +// WithAlertRelabelConfigs adds the given value to the AlertRelabelConfigs field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the AlertRelabelConfigs field. +func (b *AlertmanagerEndpointsApplyConfiguration) WithAlertRelabelConfigs(values ...*RelabelConfigApplyConfiguration) *AlertmanagerEndpointsApplyConfiguration { + for i := range values { + if values[i] == nil { + panic("nil value passed to WithAlertRelabelConfigs") + } + b.AlertRelabelConfigs = append(b.AlertRelabelConfigs, *values[i]) + } + return b +} diff --git a/vendor/github.com/prometheus-operator/prometheus-operator/pkg/client/applyconfiguration/monitoring/v1/alertmanagerglobalconfig.go b/vendor/github.com/prometheus-operator/prometheus-operator/pkg/client/applyconfiguration/monitoring/v1/alertmanagerglobalconfig.go new file mode 100644 index 0000000000..47a3582cfa --- /dev/null +++ b/vendor/github.com/prometheus-operator/prometheus-operator/pkg/client/applyconfiguration/monitoring/v1/alertmanagerglobalconfig.go @@ -0,0 +1,150 @@ +// Copyright The prometheus-operator Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1 + +import ( + monitoringv1 "github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring/v1" + corev1 "k8s.io/api/core/v1" +) + +// AlertmanagerGlobalConfigApplyConfiguration represents a declarative configuration of the AlertmanagerGlobalConfig type for use +// with apply. +type AlertmanagerGlobalConfigApplyConfiguration struct { + SMTPConfig *GlobalSMTPConfigApplyConfiguration `json:"smtp,omitempty"` + ResolveTimeout *monitoringv1.Duration `json:"resolveTimeout,omitempty"` + HTTPConfigWithProxy *HTTPConfigWithProxyApplyConfiguration `json:"httpConfig,omitempty"` + SlackAPIURL *corev1.SecretKeySelector `json:"slackApiUrl,omitempty"` + OpsGenieAPIURL *corev1.SecretKeySelector `json:"opsGenieApiUrl,omitempty"` + OpsGenieAPIKey *corev1.SecretKeySelector `json:"opsGenieApiKey,omitempty"` + PagerdutyURL *monitoringv1.URL `json:"pagerdutyUrl,omitempty"` + TelegramConfig *GlobalTelegramConfigApplyConfiguration `json:"telegram,omitempty"` + JiraConfig *GlobalJiraConfigApplyConfiguration `json:"jira,omitempty"` + VictorOpsConfig *GlobalVictorOpsConfigApplyConfiguration `json:"victorops,omitempty"` + RocketChatConfig *GlobalRocketChatConfigApplyConfiguration `json:"rocketChat,omitempty"` + WebexConfig *GlobalWebexConfigApplyConfiguration `json:"webex,omitempty"` + WeChatConfig *GlobalWeChatConfigApplyConfiguration `json:"wechat,omitempty"` +} + +// AlertmanagerGlobalConfigApplyConfiguration constructs a declarative configuration of the AlertmanagerGlobalConfig type for use with +// apply. +func AlertmanagerGlobalConfig() *AlertmanagerGlobalConfigApplyConfiguration { + return &AlertmanagerGlobalConfigApplyConfiguration{} +} + +// WithSMTPConfig sets the SMTPConfig field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the SMTPConfig field is set to the value of the last call. +func (b *AlertmanagerGlobalConfigApplyConfiguration) WithSMTPConfig(value *GlobalSMTPConfigApplyConfiguration) *AlertmanagerGlobalConfigApplyConfiguration { + b.SMTPConfig = value + return b +} + +// WithResolveTimeout sets the ResolveTimeout field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the ResolveTimeout field is set to the value of the last call. +func (b *AlertmanagerGlobalConfigApplyConfiguration) WithResolveTimeout(value monitoringv1.Duration) *AlertmanagerGlobalConfigApplyConfiguration { + b.ResolveTimeout = &value + return b +} + +// WithHTTPConfigWithProxy sets the HTTPConfigWithProxy field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the HTTPConfigWithProxy field is set to the value of the last call. +func (b *AlertmanagerGlobalConfigApplyConfiguration) WithHTTPConfigWithProxy(value *HTTPConfigWithProxyApplyConfiguration) *AlertmanagerGlobalConfigApplyConfiguration { + b.HTTPConfigWithProxy = value + return b +} + +// WithSlackAPIURL sets the SlackAPIURL field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the SlackAPIURL field is set to the value of the last call. +func (b *AlertmanagerGlobalConfigApplyConfiguration) WithSlackAPIURL(value corev1.SecretKeySelector) *AlertmanagerGlobalConfigApplyConfiguration { + b.SlackAPIURL = &value + return b +} + +// WithOpsGenieAPIURL sets the OpsGenieAPIURL field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the OpsGenieAPIURL field is set to the value of the last call. +func (b *AlertmanagerGlobalConfigApplyConfiguration) WithOpsGenieAPIURL(value corev1.SecretKeySelector) *AlertmanagerGlobalConfigApplyConfiguration { + b.OpsGenieAPIURL = &value + return b +} + +// WithOpsGenieAPIKey sets the OpsGenieAPIKey field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the OpsGenieAPIKey field is set to the value of the last call. +func (b *AlertmanagerGlobalConfigApplyConfiguration) WithOpsGenieAPIKey(value corev1.SecretKeySelector) *AlertmanagerGlobalConfigApplyConfiguration { + b.OpsGenieAPIKey = &value + return b +} + +// WithPagerdutyURL sets the PagerdutyURL field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the PagerdutyURL field is set to the value of the last call. +func (b *AlertmanagerGlobalConfigApplyConfiguration) WithPagerdutyURL(value monitoringv1.URL) *AlertmanagerGlobalConfigApplyConfiguration { + b.PagerdutyURL = &value + return b +} + +// WithTelegramConfig sets the TelegramConfig field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the TelegramConfig field is set to the value of the last call. +func (b *AlertmanagerGlobalConfigApplyConfiguration) WithTelegramConfig(value *GlobalTelegramConfigApplyConfiguration) *AlertmanagerGlobalConfigApplyConfiguration { + b.TelegramConfig = value + return b +} + +// WithJiraConfig sets the JiraConfig field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the JiraConfig field is set to the value of the last call. +func (b *AlertmanagerGlobalConfigApplyConfiguration) WithJiraConfig(value *GlobalJiraConfigApplyConfiguration) *AlertmanagerGlobalConfigApplyConfiguration { + b.JiraConfig = value + return b +} + +// WithVictorOpsConfig sets the VictorOpsConfig field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the VictorOpsConfig field is set to the value of the last call. +func (b *AlertmanagerGlobalConfigApplyConfiguration) WithVictorOpsConfig(value *GlobalVictorOpsConfigApplyConfiguration) *AlertmanagerGlobalConfigApplyConfiguration { + b.VictorOpsConfig = value + return b +} + +// WithRocketChatConfig sets the RocketChatConfig field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the RocketChatConfig field is set to the value of the last call. +func (b *AlertmanagerGlobalConfigApplyConfiguration) WithRocketChatConfig(value *GlobalRocketChatConfigApplyConfiguration) *AlertmanagerGlobalConfigApplyConfiguration { + b.RocketChatConfig = value + return b +} + +// WithWebexConfig sets the WebexConfig field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the WebexConfig field is set to the value of the last call. +func (b *AlertmanagerGlobalConfigApplyConfiguration) WithWebexConfig(value *GlobalWebexConfigApplyConfiguration) *AlertmanagerGlobalConfigApplyConfiguration { + b.WebexConfig = value + return b +} + +// WithWeChatConfig sets the WeChatConfig field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the WeChatConfig field is set to the value of the last call. +func (b *AlertmanagerGlobalConfigApplyConfiguration) WithWeChatConfig(value *GlobalWeChatConfigApplyConfiguration) *AlertmanagerGlobalConfigApplyConfiguration { + b.WeChatConfig = value + return b +} diff --git a/vendor/github.com/prometheus-operator/prometheus-operator/pkg/client/applyconfiguration/monitoring/v1/alertmanagerlimitsspec.go b/vendor/github.com/prometheus-operator/prometheus-operator/pkg/client/applyconfiguration/monitoring/v1/alertmanagerlimitsspec.go new file mode 100644 index 0000000000..f452577858 --- /dev/null +++ b/vendor/github.com/prometheus-operator/prometheus-operator/pkg/client/applyconfiguration/monitoring/v1/alertmanagerlimitsspec.go @@ -0,0 +1,50 @@ +// Copyright The prometheus-operator Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1 + +import ( + monitoringv1 "github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring/v1" +) + +// AlertmanagerLimitsSpecApplyConfiguration represents a declarative configuration of the AlertmanagerLimitsSpec type for use +// with apply. +type AlertmanagerLimitsSpecApplyConfiguration struct { + MaxSilences *int32 `json:"maxSilences,omitempty"` + MaxPerSilenceBytes *monitoringv1.ByteSize `json:"maxPerSilenceBytes,omitempty"` +} + +// AlertmanagerLimitsSpecApplyConfiguration constructs a declarative configuration of the AlertmanagerLimitsSpec type for use with +// apply. +func AlertmanagerLimitsSpec() *AlertmanagerLimitsSpecApplyConfiguration { + return &AlertmanagerLimitsSpecApplyConfiguration{} +} + +// WithMaxSilences sets the MaxSilences field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the MaxSilences field is set to the value of the last call. +func (b *AlertmanagerLimitsSpecApplyConfiguration) WithMaxSilences(value int32) *AlertmanagerLimitsSpecApplyConfiguration { + b.MaxSilences = &value + return b +} + +// WithMaxPerSilenceBytes sets the MaxPerSilenceBytes field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the MaxPerSilenceBytes field is set to the value of the last call. +func (b *AlertmanagerLimitsSpecApplyConfiguration) WithMaxPerSilenceBytes(value monitoringv1.ByteSize) *AlertmanagerLimitsSpecApplyConfiguration { + b.MaxPerSilenceBytes = &value + return b +} diff --git a/vendor/github.com/prometheus-operator/prometheus-operator/pkg/client/applyconfiguration/monitoring/v1/alertmanagerspec.go b/vendor/github.com/prometheus-operator/prometheus-operator/pkg/client/applyconfiguration/monitoring/v1/alertmanagerspec.go new file mode 100644 index 0000000000..89f7e44738 --- /dev/null +++ b/vendor/github.com/prometheus-operator/prometheus-operator/pkg/client/applyconfiguration/monitoring/v1/alertmanagerspec.go @@ -0,0 +1,622 @@ +// Copyright The prometheus-operator Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1 + +import ( + monitoringv1 "github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring/v1" + appsv1 "k8s.io/api/apps/v1" + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/client-go/applyconfigurations/meta/v1" +) + +// AlertmanagerSpecApplyConfiguration represents a declarative configuration of the AlertmanagerSpec type for use +// with apply. +type AlertmanagerSpecApplyConfiguration struct { + PodMetadata *EmbeddedObjectMetadataApplyConfiguration `json:"podMetadata,omitempty"` + Image *string `json:"image,omitempty"` + ImagePullPolicy *corev1.PullPolicy `json:"imagePullPolicy,omitempty"` + Version *string `json:"version,omitempty"` + Tag *string `json:"tag,omitempty"` + SHA *string `json:"sha,omitempty"` + BaseImage *string `json:"baseImage,omitempty"` + ImagePullSecrets []corev1.LocalObjectReference `json:"imagePullSecrets,omitempty"` + Secrets []string `json:"secrets,omitempty"` + ConfigMaps []string `json:"configMaps,omitempty"` + ConfigSecret *string `json:"configSecret,omitempty"` + LogLevel *string `json:"logLevel,omitempty"` + LogFormat *string `json:"logFormat,omitempty"` + Replicas *int32 `json:"replicas,omitempty"` + Retention *monitoringv1.GoDuration `json:"retention,omitempty"` + Storage *StorageSpecApplyConfiguration `json:"storage,omitempty"` + Volumes []corev1.Volume `json:"volumes,omitempty"` + VolumeMounts []corev1.VolumeMount `json:"volumeMounts,omitempty"` + PersistentVolumeClaimRetentionPolicy *appsv1.StatefulSetPersistentVolumeClaimRetentionPolicy `json:"persistentVolumeClaimRetentionPolicy,omitempty"` + ExternalURL *string `json:"externalUrl,omitempty"` + RoutePrefix *string `json:"routePrefix,omitempty"` + Paused *bool `json:"paused,omitempty"` + NodeSelector map[string]string `json:"nodeSelector,omitempty"` + Resources *corev1.ResourceRequirements `json:"resources,omitempty"` + Affinity *corev1.Affinity `json:"affinity,omitempty"` + Tolerations []corev1.Toleration `json:"tolerations,omitempty"` + TopologySpreadConstraints []corev1.TopologySpreadConstraint `json:"topologySpreadConstraints,omitempty"` + SecurityContext *corev1.PodSecurityContext `json:"securityContext,omitempty"` + DNSPolicy *monitoringv1.DNSPolicy `json:"dnsPolicy,omitempty"` + DNSConfig *PodDNSConfigApplyConfiguration `json:"dnsConfig,omitempty"` + EnableServiceLinks *bool `json:"enableServiceLinks,omitempty"` + ServiceName *string `json:"serviceName,omitempty"` + ServiceAccountName *string `json:"serviceAccountName,omitempty"` + ListenLocal *bool `json:"listenLocal,omitempty"` + PodManagementPolicy *monitoringv1.PodManagementPolicyType `json:"podManagementPolicy,omitempty"` + UpdateStrategy *StatefulSetUpdateStrategyApplyConfiguration `json:"updateStrategy,omitempty"` + Containers []corev1.Container `json:"containers,omitempty"` + InitContainers []corev1.Container `json:"initContainers,omitempty"` + PriorityClassName *string `json:"priorityClassName,omitempty"` + AdditionalPeers []string `json:"additionalPeers,omitempty"` + ClusterAdvertiseAddress *string `json:"clusterAdvertiseAddress,omitempty"` + ClusterGossipInterval *monitoringv1.GoDuration `json:"clusterGossipInterval,omitempty"` + ClusterLabel *string `json:"clusterLabel,omitempty"` + ClusterPushpullInterval *monitoringv1.GoDuration `json:"clusterPushpullInterval,omitempty"` + ClusterPeerTimeout *monitoringv1.GoDuration `json:"clusterPeerTimeout,omitempty"` + PortName *string `json:"portName,omitempty"` + ForceEnableClusterMode *bool `json:"forceEnableClusterMode,omitempty"` + AlertmanagerConfigSelector *metav1.LabelSelectorApplyConfiguration `json:"alertmanagerConfigSelector,omitempty"` + AlertmanagerConfigNamespaceSelector *metav1.LabelSelectorApplyConfiguration `json:"alertmanagerConfigNamespaceSelector,omitempty"` + AlertmanagerConfigMatcherStrategy *AlertmanagerConfigMatcherStrategyApplyConfiguration `json:"alertmanagerConfigMatcherStrategy,omitempty"` + MinReadySeconds *int32 `json:"minReadySeconds,omitempty"` + HostAliases []HostAliasApplyConfiguration `json:"hostAliases,omitempty"` + Web *AlertmanagerWebSpecApplyConfiguration `json:"web,omitempty"` + Limits *AlertmanagerLimitsSpecApplyConfiguration `json:"limits,omitempty"` + ClusterTLS *ClusterTLSConfigApplyConfiguration `json:"clusterTLS,omitempty"` + AlertmanagerConfiguration *AlertmanagerConfigurationApplyConfiguration `json:"alertmanagerConfiguration,omitempty"` + AutomountServiceAccountToken *bool `json:"automountServiceAccountToken,omitempty"` + EnableFeatures []string `json:"enableFeatures,omitempty"` + AdditionalArgs []ArgumentApplyConfiguration `json:"additionalArgs,omitempty"` + TerminationGracePeriodSeconds *int64 `json:"terminationGracePeriodSeconds,omitempty"` + HostUsers *bool `json:"hostUsers,omitempty"` +} + +// AlertmanagerSpecApplyConfiguration constructs a declarative configuration of the AlertmanagerSpec type for use with +// apply. +func AlertmanagerSpec() *AlertmanagerSpecApplyConfiguration { + return &AlertmanagerSpecApplyConfiguration{} +} + +// WithPodMetadata sets the PodMetadata field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the PodMetadata field is set to the value of the last call. +func (b *AlertmanagerSpecApplyConfiguration) WithPodMetadata(value *EmbeddedObjectMetadataApplyConfiguration) *AlertmanagerSpecApplyConfiguration { + b.PodMetadata = value + return b +} + +// WithImage sets the Image field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Image field is set to the value of the last call. +func (b *AlertmanagerSpecApplyConfiguration) WithImage(value string) *AlertmanagerSpecApplyConfiguration { + b.Image = &value + return b +} + +// WithImagePullPolicy sets the ImagePullPolicy field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the ImagePullPolicy field is set to the value of the last call. +func (b *AlertmanagerSpecApplyConfiguration) WithImagePullPolicy(value corev1.PullPolicy) *AlertmanagerSpecApplyConfiguration { + b.ImagePullPolicy = &value + return b +} + +// WithVersion sets the Version field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Version field is set to the value of the last call. +func (b *AlertmanagerSpecApplyConfiguration) WithVersion(value string) *AlertmanagerSpecApplyConfiguration { + b.Version = &value + return b +} + +// WithTag sets the Tag field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Tag field is set to the value of the last call. +func (b *AlertmanagerSpecApplyConfiguration) WithTag(value string) *AlertmanagerSpecApplyConfiguration { + b.Tag = &value + return b +} + +// WithSHA sets the SHA field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the SHA field is set to the value of the last call. +func (b *AlertmanagerSpecApplyConfiguration) WithSHA(value string) *AlertmanagerSpecApplyConfiguration { + b.SHA = &value + return b +} + +// WithBaseImage sets the BaseImage field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the BaseImage field is set to the value of the last call. +func (b *AlertmanagerSpecApplyConfiguration) WithBaseImage(value string) *AlertmanagerSpecApplyConfiguration { + b.BaseImage = &value + return b +} + +// WithImagePullSecrets adds the given value to the ImagePullSecrets field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the ImagePullSecrets field. +func (b *AlertmanagerSpecApplyConfiguration) WithImagePullSecrets(values ...corev1.LocalObjectReference) *AlertmanagerSpecApplyConfiguration { + for i := range values { + b.ImagePullSecrets = append(b.ImagePullSecrets, values[i]) + } + return b +} + +// WithSecrets adds the given value to the Secrets field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the Secrets field. +func (b *AlertmanagerSpecApplyConfiguration) WithSecrets(values ...string) *AlertmanagerSpecApplyConfiguration { + for i := range values { + b.Secrets = append(b.Secrets, values[i]) + } + return b +} + +// WithConfigMaps adds the given value to the ConfigMaps field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the ConfigMaps field. +func (b *AlertmanagerSpecApplyConfiguration) WithConfigMaps(values ...string) *AlertmanagerSpecApplyConfiguration { + for i := range values { + b.ConfigMaps = append(b.ConfigMaps, values[i]) + } + return b +} + +// WithConfigSecret sets the ConfigSecret field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the ConfigSecret field is set to the value of the last call. +func (b *AlertmanagerSpecApplyConfiguration) WithConfigSecret(value string) *AlertmanagerSpecApplyConfiguration { + b.ConfigSecret = &value + return b +} + +// WithLogLevel sets the LogLevel field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the LogLevel field is set to the value of the last call. +func (b *AlertmanagerSpecApplyConfiguration) WithLogLevel(value string) *AlertmanagerSpecApplyConfiguration { + b.LogLevel = &value + return b +} + +// WithLogFormat sets the LogFormat field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the LogFormat field is set to the value of the last call. +func (b *AlertmanagerSpecApplyConfiguration) WithLogFormat(value string) *AlertmanagerSpecApplyConfiguration { + b.LogFormat = &value + return b +} + +// WithReplicas sets the Replicas field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Replicas field is set to the value of the last call. +func (b *AlertmanagerSpecApplyConfiguration) WithReplicas(value int32) *AlertmanagerSpecApplyConfiguration { + b.Replicas = &value + return b +} + +// WithRetention sets the Retention field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Retention field is set to the value of the last call. +func (b *AlertmanagerSpecApplyConfiguration) WithRetention(value monitoringv1.GoDuration) *AlertmanagerSpecApplyConfiguration { + b.Retention = &value + return b +} + +// WithStorage sets the Storage field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Storage field is set to the value of the last call. +func (b *AlertmanagerSpecApplyConfiguration) WithStorage(value *StorageSpecApplyConfiguration) *AlertmanagerSpecApplyConfiguration { + b.Storage = value + return b +} + +// WithVolumes adds the given value to the Volumes field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the Volumes field. +func (b *AlertmanagerSpecApplyConfiguration) WithVolumes(values ...corev1.Volume) *AlertmanagerSpecApplyConfiguration { + for i := range values { + b.Volumes = append(b.Volumes, values[i]) + } + return b +} + +// WithVolumeMounts adds the given value to the VolumeMounts field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the VolumeMounts field. +func (b *AlertmanagerSpecApplyConfiguration) WithVolumeMounts(values ...corev1.VolumeMount) *AlertmanagerSpecApplyConfiguration { + for i := range values { + b.VolumeMounts = append(b.VolumeMounts, values[i]) + } + return b +} + +// WithPersistentVolumeClaimRetentionPolicy sets the PersistentVolumeClaimRetentionPolicy field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the PersistentVolumeClaimRetentionPolicy field is set to the value of the last call. +func (b *AlertmanagerSpecApplyConfiguration) WithPersistentVolumeClaimRetentionPolicy(value appsv1.StatefulSetPersistentVolumeClaimRetentionPolicy) *AlertmanagerSpecApplyConfiguration { + b.PersistentVolumeClaimRetentionPolicy = &value + return b +} + +// WithExternalURL sets the ExternalURL field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the ExternalURL field is set to the value of the last call. +func (b *AlertmanagerSpecApplyConfiguration) WithExternalURL(value string) *AlertmanagerSpecApplyConfiguration { + b.ExternalURL = &value + return b +} + +// WithRoutePrefix sets the RoutePrefix field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the RoutePrefix field is set to the value of the last call. +func (b *AlertmanagerSpecApplyConfiguration) WithRoutePrefix(value string) *AlertmanagerSpecApplyConfiguration { + b.RoutePrefix = &value + return b +} + +// WithPaused sets the Paused field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Paused field is set to the value of the last call. +func (b *AlertmanagerSpecApplyConfiguration) WithPaused(value bool) *AlertmanagerSpecApplyConfiguration { + b.Paused = &value + return b +} + +// WithNodeSelector puts the entries into the NodeSelector field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, the entries provided by each call will be put on the NodeSelector field, +// overwriting an existing map entries in NodeSelector field with the same key. +func (b *AlertmanagerSpecApplyConfiguration) WithNodeSelector(entries map[string]string) *AlertmanagerSpecApplyConfiguration { + if b.NodeSelector == nil && len(entries) > 0 { + b.NodeSelector = make(map[string]string, len(entries)) + } + for k, v := range entries { + b.NodeSelector[k] = v + } + return b +} + +// WithResources sets the Resources field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Resources field is set to the value of the last call. +func (b *AlertmanagerSpecApplyConfiguration) WithResources(value corev1.ResourceRequirements) *AlertmanagerSpecApplyConfiguration { + b.Resources = &value + return b +} + +// WithAffinity sets the Affinity field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Affinity field is set to the value of the last call. +func (b *AlertmanagerSpecApplyConfiguration) WithAffinity(value corev1.Affinity) *AlertmanagerSpecApplyConfiguration { + b.Affinity = &value + return b +} + +// WithTolerations adds the given value to the Tolerations field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the Tolerations field. +func (b *AlertmanagerSpecApplyConfiguration) WithTolerations(values ...corev1.Toleration) *AlertmanagerSpecApplyConfiguration { + for i := range values { + b.Tolerations = append(b.Tolerations, values[i]) + } + return b +} + +// WithTopologySpreadConstraints adds the given value to the TopologySpreadConstraints field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the TopologySpreadConstraints field. +func (b *AlertmanagerSpecApplyConfiguration) WithTopologySpreadConstraints(values ...corev1.TopologySpreadConstraint) *AlertmanagerSpecApplyConfiguration { + for i := range values { + b.TopologySpreadConstraints = append(b.TopologySpreadConstraints, values[i]) + } + return b +} + +// WithSecurityContext sets the SecurityContext field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the SecurityContext field is set to the value of the last call. +func (b *AlertmanagerSpecApplyConfiguration) WithSecurityContext(value corev1.PodSecurityContext) *AlertmanagerSpecApplyConfiguration { + b.SecurityContext = &value + return b +} + +// WithDNSPolicy sets the DNSPolicy field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the DNSPolicy field is set to the value of the last call. +func (b *AlertmanagerSpecApplyConfiguration) WithDNSPolicy(value monitoringv1.DNSPolicy) *AlertmanagerSpecApplyConfiguration { + b.DNSPolicy = &value + return b +} + +// WithDNSConfig sets the DNSConfig field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the DNSConfig field is set to the value of the last call. +func (b *AlertmanagerSpecApplyConfiguration) WithDNSConfig(value *PodDNSConfigApplyConfiguration) *AlertmanagerSpecApplyConfiguration { + b.DNSConfig = value + return b +} + +// WithEnableServiceLinks sets the EnableServiceLinks field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the EnableServiceLinks field is set to the value of the last call. +func (b *AlertmanagerSpecApplyConfiguration) WithEnableServiceLinks(value bool) *AlertmanagerSpecApplyConfiguration { + b.EnableServiceLinks = &value + return b +} + +// WithServiceName sets the ServiceName field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the ServiceName field is set to the value of the last call. +func (b *AlertmanagerSpecApplyConfiguration) WithServiceName(value string) *AlertmanagerSpecApplyConfiguration { + b.ServiceName = &value + return b +} + +// WithServiceAccountName sets the ServiceAccountName field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the ServiceAccountName field is set to the value of the last call. +func (b *AlertmanagerSpecApplyConfiguration) WithServiceAccountName(value string) *AlertmanagerSpecApplyConfiguration { + b.ServiceAccountName = &value + return b +} + +// WithListenLocal sets the ListenLocal field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the ListenLocal field is set to the value of the last call. +func (b *AlertmanagerSpecApplyConfiguration) WithListenLocal(value bool) *AlertmanagerSpecApplyConfiguration { + b.ListenLocal = &value + return b +} + +// WithPodManagementPolicy sets the PodManagementPolicy field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the PodManagementPolicy field is set to the value of the last call. +func (b *AlertmanagerSpecApplyConfiguration) WithPodManagementPolicy(value monitoringv1.PodManagementPolicyType) *AlertmanagerSpecApplyConfiguration { + b.PodManagementPolicy = &value + return b +} + +// WithUpdateStrategy sets the UpdateStrategy field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the UpdateStrategy field is set to the value of the last call. +func (b *AlertmanagerSpecApplyConfiguration) WithUpdateStrategy(value *StatefulSetUpdateStrategyApplyConfiguration) *AlertmanagerSpecApplyConfiguration { + b.UpdateStrategy = value + return b +} + +// WithContainers adds the given value to the Containers field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the Containers field. +func (b *AlertmanagerSpecApplyConfiguration) WithContainers(values ...corev1.Container) *AlertmanagerSpecApplyConfiguration { + for i := range values { + b.Containers = append(b.Containers, values[i]) + } + return b +} + +// WithInitContainers adds the given value to the InitContainers field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the InitContainers field. +func (b *AlertmanagerSpecApplyConfiguration) WithInitContainers(values ...corev1.Container) *AlertmanagerSpecApplyConfiguration { + for i := range values { + b.InitContainers = append(b.InitContainers, values[i]) + } + return b +} + +// WithPriorityClassName sets the PriorityClassName field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the PriorityClassName field is set to the value of the last call. +func (b *AlertmanagerSpecApplyConfiguration) WithPriorityClassName(value string) *AlertmanagerSpecApplyConfiguration { + b.PriorityClassName = &value + return b +} + +// WithAdditionalPeers adds the given value to the AdditionalPeers field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the AdditionalPeers field. +func (b *AlertmanagerSpecApplyConfiguration) WithAdditionalPeers(values ...string) *AlertmanagerSpecApplyConfiguration { + for i := range values { + b.AdditionalPeers = append(b.AdditionalPeers, values[i]) + } + return b +} + +// WithClusterAdvertiseAddress sets the ClusterAdvertiseAddress field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the ClusterAdvertiseAddress field is set to the value of the last call. +func (b *AlertmanagerSpecApplyConfiguration) WithClusterAdvertiseAddress(value string) *AlertmanagerSpecApplyConfiguration { + b.ClusterAdvertiseAddress = &value + return b +} + +// WithClusterGossipInterval sets the ClusterGossipInterval field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the ClusterGossipInterval field is set to the value of the last call. +func (b *AlertmanagerSpecApplyConfiguration) WithClusterGossipInterval(value monitoringv1.GoDuration) *AlertmanagerSpecApplyConfiguration { + b.ClusterGossipInterval = &value + return b +} + +// WithClusterLabel sets the ClusterLabel field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the ClusterLabel field is set to the value of the last call. +func (b *AlertmanagerSpecApplyConfiguration) WithClusterLabel(value string) *AlertmanagerSpecApplyConfiguration { + b.ClusterLabel = &value + return b +} + +// WithClusterPushpullInterval sets the ClusterPushpullInterval field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the ClusterPushpullInterval field is set to the value of the last call. +func (b *AlertmanagerSpecApplyConfiguration) WithClusterPushpullInterval(value monitoringv1.GoDuration) *AlertmanagerSpecApplyConfiguration { + b.ClusterPushpullInterval = &value + return b +} + +// WithClusterPeerTimeout sets the ClusterPeerTimeout field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the ClusterPeerTimeout field is set to the value of the last call. +func (b *AlertmanagerSpecApplyConfiguration) WithClusterPeerTimeout(value monitoringv1.GoDuration) *AlertmanagerSpecApplyConfiguration { + b.ClusterPeerTimeout = &value + return b +} + +// WithPortName sets the PortName field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the PortName field is set to the value of the last call. +func (b *AlertmanagerSpecApplyConfiguration) WithPortName(value string) *AlertmanagerSpecApplyConfiguration { + b.PortName = &value + return b +} + +// WithForceEnableClusterMode sets the ForceEnableClusterMode field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the ForceEnableClusterMode field is set to the value of the last call. +func (b *AlertmanagerSpecApplyConfiguration) WithForceEnableClusterMode(value bool) *AlertmanagerSpecApplyConfiguration { + b.ForceEnableClusterMode = &value + return b +} + +// WithAlertmanagerConfigSelector sets the AlertmanagerConfigSelector field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the AlertmanagerConfigSelector field is set to the value of the last call. +func (b *AlertmanagerSpecApplyConfiguration) WithAlertmanagerConfigSelector(value *metav1.LabelSelectorApplyConfiguration) *AlertmanagerSpecApplyConfiguration { + b.AlertmanagerConfigSelector = value + return b +} + +// WithAlertmanagerConfigNamespaceSelector sets the AlertmanagerConfigNamespaceSelector field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the AlertmanagerConfigNamespaceSelector field is set to the value of the last call. +func (b *AlertmanagerSpecApplyConfiguration) WithAlertmanagerConfigNamespaceSelector(value *metav1.LabelSelectorApplyConfiguration) *AlertmanagerSpecApplyConfiguration { + b.AlertmanagerConfigNamespaceSelector = value + return b +} + +// WithAlertmanagerConfigMatcherStrategy sets the AlertmanagerConfigMatcherStrategy field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the AlertmanagerConfigMatcherStrategy field is set to the value of the last call. +func (b *AlertmanagerSpecApplyConfiguration) WithAlertmanagerConfigMatcherStrategy(value *AlertmanagerConfigMatcherStrategyApplyConfiguration) *AlertmanagerSpecApplyConfiguration { + b.AlertmanagerConfigMatcherStrategy = value + return b +} + +// WithMinReadySeconds sets the MinReadySeconds field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the MinReadySeconds field is set to the value of the last call. +func (b *AlertmanagerSpecApplyConfiguration) WithMinReadySeconds(value int32) *AlertmanagerSpecApplyConfiguration { + b.MinReadySeconds = &value + return b +} + +// WithHostAliases adds the given value to the HostAliases field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the HostAliases field. +func (b *AlertmanagerSpecApplyConfiguration) WithHostAliases(values ...*HostAliasApplyConfiguration) *AlertmanagerSpecApplyConfiguration { + for i := range values { + if values[i] == nil { + panic("nil value passed to WithHostAliases") + } + b.HostAliases = append(b.HostAliases, *values[i]) + } + return b +} + +// WithWeb sets the Web field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Web field is set to the value of the last call. +func (b *AlertmanagerSpecApplyConfiguration) WithWeb(value *AlertmanagerWebSpecApplyConfiguration) *AlertmanagerSpecApplyConfiguration { + b.Web = value + return b +} + +// WithLimits sets the Limits field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Limits field is set to the value of the last call. +func (b *AlertmanagerSpecApplyConfiguration) WithLimits(value *AlertmanagerLimitsSpecApplyConfiguration) *AlertmanagerSpecApplyConfiguration { + b.Limits = value + return b +} + +// WithClusterTLS sets the ClusterTLS field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the ClusterTLS field is set to the value of the last call. +func (b *AlertmanagerSpecApplyConfiguration) WithClusterTLS(value *ClusterTLSConfigApplyConfiguration) *AlertmanagerSpecApplyConfiguration { + b.ClusterTLS = value + return b +} + +// WithAlertmanagerConfiguration sets the AlertmanagerConfiguration field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the AlertmanagerConfiguration field is set to the value of the last call. +func (b *AlertmanagerSpecApplyConfiguration) WithAlertmanagerConfiguration(value *AlertmanagerConfigurationApplyConfiguration) *AlertmanagerSpecApplyConfiguration { + b.AlertmanagerConfiguration = value + return b +} + +// WithAutomountServiceAccountToken sets the AutomountServiceAccountToken field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the AutomountServiceAccountToken field is set to the value of the last call. +func (b *AlertmanagerSpecApplyConfiguration) WithAutomountServiceAccountToken(value bool) *AlertmanagerSpecApplyConfiguration { + b.AutomountServiceAccountToken = &value + return b +} + +// WithEnableFeatures adds the given value to the EnableFeatures field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the EnableFeatures field. +func (b *AlertmanagerSpecApplyConfiguration) WithEnableFeatures(values ...string) *AlertmanagerSpecApplyConfiguration { + for i := range values { + b.EnableFeatures = append(b.EnableFeatures, values[i]) + } + return b +} + +// WithAdditionalArgs adds the given value to the AdditionalArgs field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the AdditionalArgs field. +func (b *AlertmanagerSpecApplyConfiguration) WithAdditionalArgs(values ...*ArgumentApplyConfiguration) *AlertmanagerSpecApplyConfiguration { + for i := range values { + if values[i] == nil { + panic("nil value passed to WithAdditionalArgs") + } + b.AdditionalArgs = append(b.AdditionalArgs, *values[i]) + } + return b +} + +// WithTerminationGracePeriodSeconds sets the TerminationGracePeriodSeconds field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the TerminationGracePeriodSeconds field is set to the value of the last call. +func (b *AlertmanagerSpecApplyConfiguration) WithTerminationGracePeriodSeconds(value int64) *AlertmanagerSpecApplyConfiguration { + b.TerminationGracePeriodSeconds = &value + return b +} + +// WithHostUsers sets the HostUsers field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the HostUsers field is set to the value of the last call. +func (b *AlertmanagerSpecApplyConfiguration) WithHostUsers(value bool) *AlertmanagerSpecApplyConfiguration { + b.HostUsers = &value + return b +} diff --git a/vendor/github.com/prometheus-operator/prometheus-operator/pkg/client/applyconfiguration/monitoring/v1/alertmanagerstatus.go b/vendor/github.com/prometheus-operator/prometheus-operator/pkg/client/applyconfiguration/monitoring/v1/alertmanagerstatus.go new file mode 100644 index 0000000000..f459945624 --- /dev/null +++ b/vendor/github.com/prometheus-operator/prometheus-operator/pkg/client/applyconfiguration/monitoring/v1/alertmanagerstatus.go @@ -0,0 +1,96 @@ +// Copyright The prometheus-operator Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1 + +// AlertmanagerStatusApplyConfiguration represents a declarative configuration of the AlertmanagerStatus type for use +// with apply. +type AlertmanagerStatusApplyConfiguration struct { + Paused *bool `json:"paused,omitempty"` + Replicas *int32 `json:"replicas,omitempty"` + UpdatedReplicas *int32 `json:"updatedReplicas,omitempty"` + AvailableReplicas *int32 `json:"availableReplicas,omitempty"` + UnavailableReplicas *int32 `json:"unavailableReplicas,omitempty"` + Selector *string `json:"selector,omitempty"` + Conditions []ConditionApplyConfiguration `json:"conditions,omitempty"` +} + +// AlertmanagerStatusApplyConfiguration constructs a declarative configuration of the AlertmanagerStatus type for use with +// apply. +func AlertmanagerStatus() *AlertmanagerStatusApplyConfiguration { + return &AlertmanagerStatusApplyConfiguration{} +} + +// WithPaused sets the Paused field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Paused field is set to the value of the last call. +func (b *AlertmanagerStatusApplyConfiguration) WithPaused(value bool) *AlertmanagerStatusApplyConfiguration { + b.Paused = &value + return b +} + +// WithReplicas sets the Replicas field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Replicas field is set to the value of the last call. +func (b *AlertmanagerStatusApplyConfiguration) WithReplicas(value int32) *AlertmanagerStatusApplyConfiguration { + b.Replicas = &value + return b +} + +// WithUpdatedReplicas sets the UpdatedReplicas field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the UpdatedReplicas field is set to the value of the last call. +func (b *AlertmanagerStatusApplyConfiguration) WithUpdatedReplicas(value int32) *AlertmanagerStatusApplyConfiguration { + b.UpdatedReplicas = &value + return b +} + +// WithAvailableReplicas sets the AvailableReplicas field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the AvailableReplicas field is set to the value of the last call. +func (b *AlertmanagerStatusApplyConfiguration) WithAvailableReplicas(value int32) *AlertmanagerStatusApplyConfiguration { + b.AvailableReplicas = &value + return b +} + +// WithUnavailableReplicas sets the UnavailableReplicas field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the UnavailableReplicas field is set to the value of the last call. +func (b *AlertmanagerStatusApplyConfiguration) WithUnavailableReplicas(value int32) *AlertmanagerStatusApplyConfiguration { + b.UnavailableReplicas = &value + return b +} + +// WithSelector sets the Selector field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Selector field is set to the value of the last call. +func (b *AlertmanagerStatusApplyConfiguration) WithSelector(value string) *AlertmanagerStatusApplyConfiguration { + b.Selector = &value + return b +} + +// WithConditions adds the given value to the Conditions field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the Conditions field. +func (b *AlertmanagerStatusApplyConfiguration) WithConditions(values ...*ConditionApplyConfiguration) *AlertmanagerStatusApplyConfiguration { + for i := range values { + if values[i] == nil { + panic("nil value passed to WithConditions") + } + b.Conditions = append(b.Conditions, *values[i]) + } + return b +} diff --git a/vendor/github.com/prometheus-operator/prometheus-operator/pkg/client/applyconfiguration/monitoring/v1/alertmanagerwebspec.go b/vendor/github.com/prometheus-operator/prometheus-operator/pkg/client/applyconfiguration/monitoring/v1/alertmanagerwebspec.go new file mode 100644 index 0000000000..843ac72447 --- /dev/null +++ b/vendor/github.com/prometheus-operator/prometheus-operator/pkg/client/applyconfiguration/monitoring/v1/alertmanagerwebspec.go @@ -0,0 +1,63 @@ +// Copyright The prometheus-operator Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1 + +// AlertmanagerWebSpecApplyConfiguration represents a declarative configuration of the AlertmanagerWebSpec type for use +// with apply. +type AlertmanagerWebSpecApplyConfiguration struct { + WebConfigFileFieldsApplyConfiguration `json:",inline"` + GetConcurrency *uint32 `json:"getConcurrency,omitempty"` + Timeout *uint32 `json:"timeout,omitempty"` +} + +// AlertmanagerWebSpecApplyConfiguration constructs a declarative configuration of the AlertmanagerWebSpec type for use with +// apply. +func AlertmanagerWebSpec() *AlertmanagerWebSpecApplyConfiguration { + return &AlertmanagerWebSpecApplyConfiguration{} +} + +// WithTLSConfig sets the TLSConfig field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the TLSConfig field is set to the value of the last call. +func (b *AlertmanagerWebSpecApplyConfiguration) WithTLSConfig(value *WebTLSConfigApplyConfiguration) *AlertmanagerWebSpecApplyConfiguration { + b.WebConfigFileFieldsApplyConfiguration.TLSConfig = value + return b +} + +// WithHTTPConfig sets the HTTPConfig field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the HTTPConfig field is set to the value of the last call. +func (b *AlertmanagerWebSpecApplyConfiguration) WithHTTPConfig(value *WebHTTPConfigApplyConfiguration) *AlertmanagerWebSpecApplyConfiguration { + b.WebConfigFileFieldsApplyConfiguration.HTTPConfig = value + return b +} + +// WithGetConcurrency sets the GetConcurrency field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the GetConcurrency field is set to the value of the last call. +func (b *AlertmanagerWebSpecApplyConfiguration) WithGetConcurrency(value uint32) *AlertmanagerWebSpecApplyConfiguration { + b.GetConcurrency = &value + return b +} + +// WithTimeout sets the Timeout field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Timeout field is set to the value of the last call. +func (b *AlertmanagerWebSpecApplyConfiguration) WithTimeout(value uint32) *AlertmanagerWebSpecApplyConfiguration { + b.Timeout = &value + return b +} diff --git a/vendor/github.com/prometheus-operator/prometheus-operator/pkg/client/applyconfiguration/monitoring/v1/apiserverconfig.go b/vendor/github.com/prometheus-operator/prometheus-operator/pkg/client/applyconfiguration/monitoring/v1/apiserverconfig.go new file mode 100644 index 0000000000..b6f2915140 --- /dev/null +++ b/vendor/github.com/prometheus-operator/prometheus-operator/pkg/client/applyconfiguration/monitoring/v1/apiserverconfig.go @@ -0,0 +1,125 @@ +// Copyright The prometheus-operator Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1 + +import ( + corev1 "k8s.io/api/core/v1" +) + +// APIServerConfigApplyConfiguration represents a declarative configuration of the APIServerConfig type for use +// with apply. +type APIServerConfigApplyConfiguration struct { + Host *string `json:"host,omitempty"` + BasicAuth *BasicAuthApplyConfiguration `json:"basicAuth,omitempty"` + BearerTokenFile *string `json:"bearerTokenFile,omitempty"` + TLSConfig *TLSConfigApplyConfiguration `json:"tlsConfig,omitempty"` + Authorization *AuthorizationApplyConfiguration `json:"authorization,omitempty"` + BearerToken *string `json:"bearerToken,omitempty"` + ProxyConfigApplyConfiguration `json:",inline"` +} + +// APIServerConfigApplyConfiguration constructs a declarative configuration of the APIServerConfig type for use with +// apply. +func APIServerConfig() *APIServerConfigApplyConfiguration { + return &APIServerConfigApplyConfiguration{} +} + +// WithHost sets the Host field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Host field is set to the value of the last call. +func (b *APIServerConfigApplyConfiguration) WithHost(value string) *APIServerConfigApplyConfiguration { + b.Host = &value + return b +} + +// WithBasicAuth sets the BasicAuth field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the BasicAuth field is set to the value of the last call. +func (b *APIServerConfigApplyConfiguration) WithBasicAuth(value *BasicAuthApplyConfiguration) *APIServerConfigApplyConfiguration { + b.BasicAuth = value + return b +} + +// WithBearerTokenFile sets the BearerTokenFile field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the BearerTokenFile field is set to the value of the last call. +func (b *APIServerConfigApplyConfiguration) WithBearerTokenFile(value string) *APIServerConfigApplyConfiguration { + b.BearerTokenFile = &value + return b +} + +// WithTLSConfig sets the TLSConfig field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the TLSConfig field is set to the value of the last call. +func (b *APIServerConfigApplyConfiguration) WithTLSConfig(value *TLSConfigApplyConfiguration) *APIServerConfigApplyConfiguration { + b.TLSConfig = value + return b +} + +// WithAuthorization sets the Authorization field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Authorization field is set to the value of the last call. +func (b *APIServerConfigApplyConfiguration) WithAuthorization(value *AuthorizationApplyConfiguration) *APIServerConfigApplyConfiguration { + b.Authorization = value + return b +} + +// WithBearerToken sets the BearerToken field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the BearerToken field is set to the value of the last call. +func (b *APIServerConfigApplyConfiguration) WithBearerToken(value string) *APIServerConfigApplyConfiguration { + b.BearerToken = &value + return b +} + +// WithProxyURL sets the ProxyURL field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the ProxyURL field is set to the value of the last call. +func (b *APIServerConfigApplyConfiguration) WithProxyURL(value string) *APIServerConfigApplyConfiguration { + b.ProxyConfigApplyConfiguration.ProxyURL = &value + return b +} + +// WithNoProxy sets the NoProxy field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the NoProxy field is set to the value of the last call. +func (b *APIServerConfigApplyConfiguration) WithNoProxy(value string) *APIServerConfigApplyConfiguration { + b.ProxyConfigApplyConfiguration.NoProxy = &value + return b +} + +// WithProxyFromEnvironment sets the ProxyFromEnvironment field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the ProxyFromEnvironment field is set to the value of the last call. +func (b *APIServerConfigApplyConfiguration) WithProxyFromEnvironment(value bool) *APIServerConfigApplyConfiguration { + b.ProxyConfigApplyConfiguration.ProxyFromEnvironment = &value + return b +} + +// WithProxyConnectHeader puts the entries into the ProxyConnectHeader field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, the entries provided by each call will be put on the ProxyConnectHeader field, +// overwriting an existing map entries in ProxyConnectHeader field with the same key. +func (b *APIServerConfigApplyConfiguration) WithProxyConnectHeader(entries map[string][]corev1.SecretKeySelector) *APIServerConfigApplyConfiguration { + if b.ProxyConfigApplyConfiguration.ProxyConnectHeader == nil && len(entries) > 0 { + b.ProxyConfigApplyConfiguration.ProxyConnectHeader = make(map[string][]corev1.SecretKeySelector, len(entries)) + } + for k, v := range entries { + b.ProxyConfigApplyConfiguration.ProxyConnectHeader[k] = v + } + return b +} diff --git a/vendor/github.com/prometheus-operator/prometheus-operator/pkg/client/applyconfiguration/monitoring/v1/arbitraryfsaccessthroughsmsconfig.go b/vendor/github.com/prometheus-operator/prometheus-operator/pkg/client/applyconfiguration/monitoring/v1/arbitraryfsaccessthroughsmsconfig.go new file mode 100644 index 0000000000..3c63d01a9b --- /dev/null +++ b/vendor/github.com/prometheus-operator/prometheus-operator/pkg/client/applyconfiguration/monitoring/v1/arbitraryfsaccessthroughsmsconfig.go @@ -0,0 +1,37 @@ +// Copyright The prometheus-operator Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1 + +// ArbitraryFSAccessThroughSMsConfigApplyConfiguration represents a declarative configuration of the ArbitraryFSAccessThroughSMsConfig type for use +// with apply. +type ArbitraryFSAccessThroughSMsConfigApplyConfiguration struct { + Deny *bool `json:"deny,omitempty"` +} + +// ArbitraryFSAccessThroughSMsConfigApplyConfiguration constructs a declarative configuration of the ArbitraryFSAccessThroughSMsConfig type for use with +// apply. +func ArbitraryFSAccessThroughSMsConfig() *ArbitraryFSAccessThroughSMsConfigApplyConfiguration { + return &ArbitraryFSAccessThroughSMsConfigApplyConfiguration{} +} + +// WithDeny sets the Deny field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Deny field is set to the value of the last call. +func (b *ArbitraryFSAccessThroughSMsConfigApplyConfiguration) WithDeny(value bool) *ArbitraryFSAccessThroughSMsConfigApplyConfiguration { + b.Deny = &value + return b +} diff --git a/vendor/github.com/prometheus-operator/prometheus-operator/pkg/client/applyconfiguration/monitoring/v1/argument.go b/vendor/github.com/prometheus-operator/prometheus-operator/pkg/client/applyconfiguration/monitoring/v1/argument.go new file mode 100644 index 0000000000..f1d9b91455 --- /dev/null +++ b/vendor/github.com/prometheus-operator/prometheus-operator/pkg/client/applyconfiguration/monitoring/v1/argument.go @@ -0,0 +1,46 @@ +// Copyright The prometheus-operator Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1 + +// ArgumentApplyConfiguration represents a declarative configuration of the Argument type for use +// with apply. +type ArgumentApplyConfiguration struct { + Name *string `json:"name,omitempty"` + Value *string `json:"value,omitempty"` +} + +// ArgumentApplyConfiguration constructs a declarative configuration of the Argument type for use with +// apply. +func Argument() *ArgumentApplyConfiguration { + return &ArgumentApplyConfiguration{} +} + +// WithName sets the Name field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Name field is set to the value of the last call. +func (b *ArgumentApplyConfiguration) WithName(value string) *ArgumentApplyConfiguration { + b.Name = &value + return b +} + +// WithValue sets the Value field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Value field is set to the value of the last call. +func (b *ArgumentApplyConfiguration) WithValue(value string) *ArgumentApplyConfiguration { + b.Value = &value + return b +} diff --git a/vendor/github.com/prometheus-operator/prometheus-operator/pkg/client/applyconfiguration/monitoring/v1/attachmetadata.go b/vendor/github.com/prometheus-operator/prometheus-operator/pkg/client/applyconfiguration/monitoring/v1/attachmetadata.go new file mode 100644 index 0000000000..ff71219de9 --- /dev/null +++ b/vendor/github.com/prometheus-operator/prometheus-operator/pkg/client/applyconfiguration/monitoring/v1/attachmetadata.go @@ -0,0 +1,37 @@ +// Copyright The prometheus-operator Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1 + +// AttachMetadataApplyConfiguration represents a declarative configuration of the AttachMetadata type for use +// with apply. +type AttachMetadataApplyConfiguration struct { + Node *bool `json:"node,omitempty"` +} + +// AttachMetadataApplyConfiguration constructs a declarative configuration of the AttachMetadata type for use with +// apply. +func AttachMetadata() *AttachMetadataApplyConfiguration { + return &AttachMetadataApplyConfiguration{} +} + +// WithNode sets the Node field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Node field is set to the value of the last call. +func (b *AttachMetadataApplyConfiguration) WithNode(value bool) *AttachMetadataApplyConfiguration { + b.Node = &value + return b +} diff --git a/vendor/github.com/prometheus-operator/prometheus-operator/pkg/client/applyconfiguration/monitoring/v1/authorization.go b/vendor/github.com/prometheus-operator/prometheus-operator/pkg/client/applyconfiguration/monitoring/v1/authorization.go new file mode 100644 index 0000000000..96ffd3d4ad --- /dev/null +++ b/vendor/github.com/prometheus-operator/prometheus-operator/pkg/client/applyconfiguration/monitoring/v1/authorization.go @@ -0,0 +1,58 @@ +// Copyright The prometheus-operator Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1 + +import ( + corev1 "k8s.io/api/core/v1" +) + +// AuthorizationApplyConfiguration represents a declarative configuration of the Authorization type for use +// with apply. +type AuthorizationApplyConfiguration struct { + SafeAuthorizationApplyConfiguration `json:",inline"` + CredentialsFile *string `json:"credentialsFile,omitempty"` +} + +// AuthorizationApplyConfiguration constructs a declarative configuration of the Authorization type for use with +// apply. +func Authorization() *AuthorizationApplyConfiguration { + return &AuthorizationApplyConfiguration{} +} + +// WithType sets the Type field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Type field is set to the value of the last call. +func (b *AuthorizationApplyConfiguration) WithType(value string) *AuthorizationApplyConfiguration { + b.SafeAuthorizationApplyConfiguration.Type = &value + return b +} + +// WithCredentials sets the Credentials field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Credentials field is set to the value of the last call. +func (b *AuthorizationApplyConfiguration) WithCredentials(value corev1.SecretKeySelector) *AuthorizationApplyConfiguration { + b.SafeAuthorizationApplyConfiguration.Credentials = &value + return b +} + +// WithCredentialsFile sets the CredentialsFile field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the CredentialsFile field is set to the value of the last call. +func (b *AuthorizationApplyConfiguration) WithCredentialsFile(value string) *AuthorizationApplyConfiguration { + b.CredentialsFile = &value + return b +} diff --git a/vendor/github.com/prometheus-operator/prometheus-operator/pkg/client/applyconfiguration/monitoring/v1/azuread.go b/vendor/github.com/prometheus-operator/prometheus-operator/pkg/client/applyconfiguration/monitoring/v1/azuread.go new file mode 100644 index 0000000000..22e0b20ef8 --- /dev/null +++ b/vendor/github.com/prometheus-operator/prometheus-operator/pkg/client/applyconfiguration/monitoring/v1/azuread.go @@ -0,0 +1,82 @@ +// Copyright The prometheus-operator Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1 + +// AzureADApplyConfiguration represents a declarative configuration of the AzureAD type for use +// with apply. +type AzureADApplyConfiguration struct { + Cloud *string `json:"cloud,omitempty"` + ManagedIdentity *ManagedIdentityApplyConfiguration `json:"managedIdentity,omitempty"` + OAuth *AzureOAuthApplyConfiguration `json:"oauth,omitempty"` + SDK *AzureSDKApplyConfiguration `json:"sdk,omitempty"` + WorkloadIdentity *AzureWorkloadIdentityApplyConfiguration `json:"workloadIdentity,omitempty"` + Scope *string `json:"scope,omitempty"` +} + +// AzureADApplyConfiguration constructs a declarative configuration of the AzureAD type for use with +// apply. +func AzureAD() *AzureADApplyConfiguration { + return &AzureADApplyConfiguration{} +} + +// WithCloud sets the Cloud field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Cloud field is set to the value of the last call. +func (b *AzureADApplyConfiguration) WithCloud(value string) *AzureADApplyConfiguration { + b.Cloud = &value + return b +} + +// WithManagedIdentity sets the ManagedIdentity field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the ManagedIdentity field is set to the value of the last call. +func (b *AzureADApplyConfiguration) WithManagedIdentity(value *ManagedIdentityApplyConfiguration) *AzureADApplyConfiguration { + b.ManagedIdentity = value + return b +} + +// WithOAuth sets the OAuth field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the OAuth field is set to the value of the last call. +func (b *AzureADApplyConfiguration) WithOAuth(value *AzureOAuthApplyConfiguration) *AzureADApplyConfiguration { + b.OAuth = value + return b +} + +// WithSDK sets the SDK field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the SDK field is set to the value of the last call. +func (b *AzureADApplyConfiguration) WithSDK(value *AzureSDKApplyConfiguration) *AzureADApplyConfiguration { + b.SDK = value + return b +} + +// WithWorkloadIdentity sets the WorkloadIdentity field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the WorkloadIdentity field is set to the value of the last call. +func (b *AzureADApplyConfiguration) WithWorkloadIdentity(value *AzureWorkloadIdentityApplyConfiguration) *AzureADApplyConfiguration { + b.WorkloadIdentity = value + return b +} + +// WithScope sets the Scope field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Scope field is set to the value of the last call. +func (b *AzureADApplyConfiguration) WithScope(value string) *AzureADApplyConfiguration { + b.Scope = &value + return b +} diff --git a/vendor/github.com/prometheus-operator/prometheus-operator/pkg/client/applyconfiguration/monitoring/v1/azureoauth.go b/vendor/github.com/prometheus-operator/prometheus-operator/pkg/client/applyconfiguration/monitoring/v1/azureoauth.go new file mode 100644 index 0000000000..6ad42ec56b --- /dev/null +++ b/vendor/github.com/prometheus-operator/prometheus-operator/pkg/client/applyconfiguration/monitoring/v1/azureoauth.go @@ -0,0 +1,59 @@ +// Copyright The prometheus-operator Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1 + +import ( + corev1 "k8s.io/api/core/v1" +) + +// AzureOAuthApplyConfiguration represents a declarative configuration of the AzureOAuth type for use +// with apply. +type AzureOAuthApplyConfiguration struct { + ClientID *string `json:"clientId,omitempty"` + ClientSecret *corev1.SecretKeySelector `json:"clientSecret,omitempty"` + TenantID *string `json:"tenantId,omitempty"` +} + +// AzureOAuthApplyConfiguration constructs a declarative configuration of the AzureOAuth type for use with +// apply. +func AzureOAuth() *AzureOAuthApplyConfiguration { + return &AzureOAuthApplyConfiguration{} +} + +// WithClientID sets the ClientID field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the ClientID field is set to the value of the last call. +func (b *AzureOAuthApplyConfiguration) WithClientID(value string) *AzureOAuthApplyConfiguration { + b.ClientID = &value + return b +} + +// WithClientSecret sets the ClientSecret field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the ClientSecret field is set to the value of the last call. +func (b *AzureOAuthApplyConfiguration) WithClientSecret(value corev1.SecretKeySelector) *AzureOAuthApplyConfiguration { + b.ClientSecret = &value + return b +} + +// WithTenantID sets the TenantID field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the TenantID field is set to the value of the last call. +func (b *AzureOAuthApplyConfiguration) WithTenantID(value string) *AzureOAuthApplyConfiguration { + b.TenantID = &value + return b +} diff --git a/vendor/github.com/prometheus-operator/prometheus-operator/pkg/client/applyconfiguration/monitoring/v1/azuresdk.go b/vendor/github.com/prometheus-operator/prometheus-operator/pkg/client/applyconfiguration/monitoring/v1/azuresdk.go new file mode 100644 index 0000000000..51f71793de --- /dev/null +++ b/vendor/github.com/prometheus-operator/prometheus-operator/pkg/client/applyconfiguration/monitoring/v1/azuresdk.go @@ -0,0 +1,37 @@ +// Copyright The prometheus-operator Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1 + +// AzureSDKApplyConfiguration represents a declarative configuration of the AzureSDK type for use +// with apply. +type AzureSDKApplyConfiguration struct { + TenantID *string `json:"tenantId,omitempty"` +} + +// AzureSDKApplyConfiguration constructs a declarative configuration of the AzureSDK type for use with +// apply. +func AzureSDK() *AzureSDKApplyConfiguration { + return &AzureSDKApplyConfiguration{} +} + +// WithTenantID sets the TenantID field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the TenantID field is set to the value of the last call. +func (b *AzureSDKApplyConfiguration) WithTenantID(value string) *AzureSDKApplyConfiguration { + b.TenantID = &value + return b +} diff --git a/vendor/github.com/prometheus-operator/prometheus-operator/pkg/client/applyconfiguration/monitoring/v1/azureworkloadidentity.go b/vendor/github.com/prometheus-operator/prometheus-operator/pkg/client/applyconfiguration/monitoring/v1/azureworkloadidentity.go new file mode 100644 index 0000000000..0db86616bf --- /dev/null +++ b/vendor/github.com/prometheus-operator/prometheus-operator/pkg/client/applyconfiguration/monitoring/v1/azureworkloadidentity.go @@ -0,0 +1,46 @@ +// Copyright The prometheus-operator Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1 + +// AzureWorkloadIdentityApplyConfiguration represents a declarative configuration of the AzureWorkloadIdentity type for use +// with apply. +type AzureWorkloadIdentityApplyConfiguration struct { + ClientID *string `json:"clientId,omitempty"` + TenantID *string `json:"tenantId,omitempty"` +} + +// AzureWorkloadIdentityApplyConfiguration constructs a declarative configuration of the AzureWorkloadIdentity type for use with +// apply. +func AzureWorkloadIdentity() *AzureWorkloadIdentityApplyConfiguration { + return &AzureWorkloadIdentityApplyConfiguration{} +} + +// WithClientID sets the ClientID field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the ClientID field is set to the value of the last call. +func (b *AzureWorkloadIdentityApplyConfiguration) WithClientID(value string) *AzureWorkloadIdentityApplyConfiguration { + b.ClientID = &value + return b +} + +// WithTenantID sets the TenantID field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the TenantID field is set to the value of the last call. +func (b *AzureWorkloadIdentityApplyConfiguration) WithTenantID(value string) *AzureWorkloadIdentityApplyConfiguration { + b.TenantID = &value + return b +} diff --git a/vendor/github.com/prometheus-operator/prometheus-operator/pkg/client/applyconfiguration/monitoring/v1/basicauth.go b/vendor/github.com/prometheus-operator/prometheus-operator/pkg/client/applyconfiguration/monitoring/v1/basicauth.go new file mode 100644 index 0000000000..88f0a3bf40 --- /dev/null +++ b/vendor/github.com/prometheus-operator/prometheus-operator/pkg/client/applyconfiguration/monitoring/v1/basicauth.go @@ -0,0 +1,50 @@ +// Copyright The prometheus-operator Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1 + +import ( + corev1 "k8s.io/api/core/v1" +) + +// BasicAuthApplyConfiguration represents a declarative configuration of the BasicAuth type for use +// with apply. +type BasicAuthApplyConfiguration struct { + Username *corev1.SecretKeySelector `json:"username,omitempty"` + Password *corev1.SecretKeySelector `json:"password,omitempty"` +} + +// BasicAuthApplyConfiguration constructs a declarative configuration of the BasicAuth type for use with +// apply. +func BasicAuth() *BasicAuthApplyConfiguration { + return &BasicAuthApplyConfiguration{} +} + +// WithUsername sets the Username field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Username field is set to the value of the last call. +func (b *BasicAuthApplyConfiguration) WithUsername(value corev1.SecretKeySelector) *BasicAuthApplyConfiguration { + b.Username = &value + return b +} + +// WithPassword sets the Password field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Password field is set to the value of the last call. +func (b *BasicAuthApplyConfiguration) WithPassword(value corev1.SecretKeySelector) *BasicAuthApplyConfiguration { + b.Password = &value + return b +} diff --git a/vendor/github.com/prometheus-operator/prometheus-operator/pkg/client/applyconfiguration/monitoring/v1/clustertlsconfig.go b/vendor/github.com/prometheus-operator/prometheus-operator/pkg/client/applyconfiguration/monitoring/v1/clustertlsconfig.go new file mode 100644 index 0000000000..a76a56e907 --- /dev/null +++ b/vendor/github.com/prometheus-operator/prometheus-operator/pkg/client/applyconfiguration/monitoring/v1/clustertlsconfig.go @@ -0,0 +1,46 @@ +// Copyright The prometheus-operator Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1 + +// ClusterTLSConfigApplyConfiguration represents a declarative configuration of the ClusterTLSConfig type for use +// with apply. +type ClusterTLSConfigApplyConfiguration struct { + ServerTLS *WebTLSConfigApplyConfiguration `json:"server,omitempty"` + ClientTLS *SafeTLSConfigApplyConfiguration `json:"client,omitempty"` +} + +// ClusterTLSConfigApplyConfiguration constructs a declarative configuration of the ClusterTLSConfig type for use with +// apply. +func ClusterTLSConfig() *ClusterTLSConfigApplyConfiguration { + return &ClusterTLSConfigApplyConfiguration{} +} + +// WithServerTLS sets the ServerTLS field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the ServerTLS field is set to the value of the last call. +func (b *ClusterTLSConfigApplyConfiguration) WithServerTLS(value *WebTLSConfigApplyConfiguration) *ClusterTLSConfigApplyConfiguration { + b.ServerTLS = value + return b +} + +// WithClientTLS sets the ClientTLS field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the ClientTLS field is set to the value of the last call. +func (b *ClusterTLSConfigApplyConfiguration) WithClientTLS(value *SafeTLSConfigApplyConfiguration) *ClusterTLSConfigApplyConfiguration { + b.ClientTLS = value + return b +} diff --git a/vendor/github.com/prometheus-operator/prometheus-operator/pkg/client/applyconfiguration/monitoring/v1/commonprometheusfields.go b/vendor/github.com/prometheus-operator/prometheus-operator/pkg/client/applyconfiguration/monitoring/v1/commonprometheusfields.go new file mode 100644 index 0000000000..07d2e30897 --- /dev/null +++ b/vendor/github.com/prometheus-operator/prometheus-operator/pkg/client/applyconfiguration/monitoring/v1/commonprometheusfields.go @@ -0,0 +1,1010 @@ +// Copyright The prometheus-operator Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1 + +import ( + monitoringv1 "github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring/v1" + appsv1 "k8s.io/api/apps/v1" + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/client-go/applyconfigurations/meta/v1" +) + +// CommonPrometheusFieldsApplyConfiguration represents a declarative configuration of the CommonPrometheusFields type for use +// with apply. +type CommonPrometheusFieldsApplyConfiguration struct { + PodMetadata *EmbeddedObjectMetadataApplyConfiguration `json:"podMetadata,omitempty"` + ServiceMonitorSelector *metav1.LabelSelectorApplyConfiguration `json:"serviceMonitorSelector,omitempty"` + ServiceMonitorNamespaceSelector *metav1.LabelSelectorApplyConfiguration `json:"serviceMonitorNamespaceSelector,omitempty"` + PodMonitorSelector *metav1.LabelSelectorApplyConfiguration `json:"podMonitorSelector,omitempty"` + PodMonitorNamespaceSelector *metav1.LabelSelectorApplyConfiguration `json:"podMonitorNamespaceSelector,omitempty"` + ProbeSelector *metav1.LabelSelectorApplyConfiguration `json:"probeSelector,omitempty"` + ProbeNamespaceSelector *metav1.LabelSelectorApplyConfiguration `json:"probeNamespaceSelector,omitempty"` + ScrapeConfigSelector *metav1.LabelSelectorApplyConfiguration `json:"scrapeConfigSelector,omitempty"` + ScrapeConfigNamespaceSelector *metav1.LabelSelectorApplyConfiguration `json:"scrapeConfigNamespaceSelector,omitempty"` + Version *string `json:"version,omitempty"` + Paused *bool `json:"paused,omitempty"` + Image *string `json:"image,omitempty"` + ImagePullPolicy *corev1.PullPolicy `json:"imagePullPolicy,omitempty"` + ImagePullSecrets []corev1.LocalObjectReference `json:"imagePullSecrets,omitempty"` + Replicas *int32 `json:"replicas,omitempty"` + Shards *int32 `json:"shards,omitempty"` + ReplicaExternalLabelName *string `json:"replicaExternalLabelName,omitempty"` + PrometheusExternalLabelName *string `json:"prometheusExternalLabelName,omitempty"` + LogLevel *string `json:"logLevel,omitempty"` + LogFormat *string `json:"logFormat,omitempty"` + ScrapeInterval *monitoringv1.Duration `json:"scrapeInterval,omitempty"` + ScrapeTimeout *monitoringv1.Duration `json:"scrapeTimeout,omitempty"` + ScrapeProtocols []monitoringv1.ScrapeProtocol `json:"scrapeProtocols,omitempty"` + ExternalLabels map[string]string `json:"externalLabels,omitempty"` + EnableRemoteWriteReceiver *bool `json:"enableRemoteWriteReceiver,omitempty"` + EnableOTLPReceiver *bool `json:"enableOTLPReceiver,omitempty"` + RemoteWriteReceiverMessageVersions []monitoringv1.RemoteWriteMessageVersion `json:"remoteWriteReceiverMessageVersions,omitempty"` + EnableFeatures []monitoringv1.EnableFeature `json:"enableFeatures,omitempty"` + ExternalURL *string `json:"externalUrl,omitempty"` + RoutePrefix *string `json:"routePrefix,omitempty"` + Storage *StorageSpecApplyConfiguration `json:"storage,omitempty"` + Volumes []corev1.Volume `json:"volumes,omitempty"` + VolumeMounts []corev1.VolumeMount `json:"volumeMounts,omitempty"` + PersistentVolumeClaimRetentionPolicy *appsv1.StatefulSetPersistentVolumeClaimRetentionPolicy `json:"persistentVolumeClaimRetentionPolicy,omitempty"` + Web *PrometheusWebSpecApplyConfiguration `json:"web,omitempty"` + Resources *corev1.ResourceRequirements `json:"resources,omitempty"` + NodeSelector map[string]string `json:"nodeSelector,omitempty"` + ServiceAccountName *string `json:"serviceAccountName,omitempty"` + AutomountServiceAccountToken *bool `json:"automountServiceAccountToken,omitempty"` + Secrets []string `json:"secrets,omitempty"` + ConfigMaps []string `json:"configMaps,omitempty"` + Affinity *corev1.Affinity `json:"affinity,omitempty"` + Tolerations []corev1.Toleration `json:"tolerations,omitempty"` + TopologySpreadConstraints []TopologySpreadConstraintApplyConfiguration `json:"topologySpreadConstraints,omitempty"` + RemoteWrite []RemoteWriteSpecApplyConfiguration `json:"remoteWrite,omitempty"` + OTLP *OTLPConfigApplyConfiguration `json:"otlp,omitempty"` + SecurityContext *corev1.PodSecurityContext `json:"securityContext,omitempty"` + DNSPolicy *monitoringv1.DNSPolicy `json:"dnsPolicy,omitempty"` + DNSConfig *PodDNSConfigApplyConfiguration `json:"dnsConfig,omitempty"` + ListenLocal *bool `json:"listenLocal,omitempty"` + PodManagementPolicy *monitoringv1.PodManagementPolicyType `json:"podManagementPolicy,omitempty"` + UpdateStrategy *StatefulSetUpdateStrategyApplyConfiguration `json:"updateStrategy,omitempty"` + EnableServiceLinks *bool `json:"enableServiceLinks,omitempty"` + Containers []corev1.Container `json:"containers,omitempty"` + InitContainers []corev1.Container `json:"initContainers,omitempty"` + AdditionalScrapeConfigs *corev1.SecretKeySelector `json:"additionalScrapeConfigs,omitempty"` + APIServerConfig *APIServerConfigApplyConfiguration `json:"apiserverConfig,omitempty"` + PriorityClassName *string `json:"priorityClassName,omitempty"` + PortName *string `json:"portName,omitempty"` + ArbitraryFSAccessThroughSMs *ArbitraryFSAccessThroughSMsConfigApplyConfiguration `json:"arbitraryFSAccessThroughSMs,omitempty"` + OverrideHonorLabels *bool `json:"overrideHonorLabels,omitempty"` + OverrideHonorTimestamps *bool `json:"overrideHonorTimestamps,omitempty"` + IgnoreNamespaceSelectors *bool `json:"ignoreNamespaceSelectors,omitempty"` + EnforcedNamespaceLabel *string `json:"enforcedNamespaceLabel,omitempty"` + EnforcedSampleLimit *uint64 `json:"enforcedSampleLimit,omitempty"` + EnforcedTargetLimit *uint64 `json:"enforcedTargetLimit,omitempty"` + EnforcedLabelLimit *uint64 `json:"enforcedLabelLimit,omitempty"` + EnforcedLabelNameLengthLimit *uint64 `json:"enforcedLabelNameLengthLimit,omitempty"` + EnforcedLabelValueLengthLimit *uint64 `json:"enforcedLabelValueLengthLimit,omitempty"` + EnforcedKeepDroppedTargets *uint64 `json:"enforcedKeepDroppedTargets,omitempty"` + EnforcedBodySizeLimit *monitoringv1.ByteSize `json:"enforcedBodySizeLimit,omitempty"` + NameValidationScheme *monitoringv1.NameValidationSchemeOptions `json:"nameValidationScheme,omitempty"` + NameEscapingScheme *monitoringv1.NameEscapingSchemeOptions `json:"nameEscapingScheme,omitempty"` + ConvertClassicHistogramsToNHCB *bool `json:"convertClassicHistogramsToNHCB,omitempty"` + ScrapeNativeHistograms *bool `json:"scrapeNativeHistograms,omitempty"` + ScrapeClassicHistograms *bool `json:"scrapeClassicHistograms,omitempty"` + MinReadySeconds *int32 `json:"minReadySeconds,omitempty"` + HostAliases []HostAliasApplyConfiguration `json:"hostAliases,omitempty"` + AdditionalArgs []ArgumentApplyConfiguration `json:"additionalArgs,omitempty"` + WALCompression *bool `json:"walCompression,omitempty"` + ExcludedFromEnforcement []ObjectReferenceApplyConfiguration `json:"excludedFromEnforcement,omitempty"` + HostNetwork *bool `json:"hostNetwork,omitempty"` + PodTargetLabels []string `json:"podTargetLabels,omitempty"` + TracingConfig *TracingConfigApplyConfiguration `json:"tracingConfig,omitempty"` + BodySizeLimit *monitoringv1.ByteSize `json:"bodySizeLimit,omitempty"` + SampleLimit *uint64 `json:"sampleLimit,omitempty"` + TargetLimit *uint64 `json:"targetLimit,omitempty"` + LabelLimit *uint64 `json:"labelLimit,omitempty"` + LabelNameLengthLimit *uint64 `json:"labelNameLengthLimit,omitempty"` + LabelValueLengthLimit *uint64 `json:"labelValueLengthLimit,omitempty"` + KeepDroppedTargets *uint64 `json:"keepDroppedTargets,omitempty"` + ReloadStrategy *monitoringv1.ReloadStrategyType `json:"reloadStrategy,omitempty"` + MaximumStartupDurationSeconds *int32 `json:"maximumStartupDurationSeconds,omitempty"` + ScrapeClasses []ScrapeClassApplyConfiguration `json:"scrapeClasses,omitempty"` + ServiceDiscoveryRole *monitoringv1.ServiceDiscoveryRole `json:"serviceDiscoveryRole,omitempty"` + TSDB *TSDBSpecApplyConfiguration `json:"tsdb,omitempty"` + ScrapeFailureLogFile *string `json:"scrapeFailureLogFile,omitempty"` + ServiceName *string `json:"serviceName,omitempty"` + Runtime *RuntimeConfigApplyConfiguration `json:"runtime,omitempty"` + TerminationGracePeriodSeconds *int64 `json:"terminationGracePeriodSeconds,omitempty"` + HostUsers *bool `json:"hostUsers,omitempty"` +} + +// CommonPrometheusFieldsApplyConfiguration constructs a declarative configuration of the CommonPrometheusFields type for use with +// apply. +func CommonPrometheusFields() *CommonPrometheusFieldsApplyConfiguration { + return &CommonPrometheusFieldsApplyConfiguration{} +} + +// WithPodMetadata sets the PodMetadata field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the PodMetadata field is set to the value of the last call. +func (b *CommonPrometheusFieldsApplyConfiguration) WithPodMetadata(value *EmbeddedObjectMetadataApplyConfiguration) *CommonPrometheusFieldsApplyConfiguration { + b.PodMetadata = value + return b +} + +// WithServiceMonitorSelector sets the ServiceMonitorSelector field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the ServiceMonitorSelector field is set to the value of the last call. +func (b *CommonPrometheusFieldsApplyConfiguration) WithServiceMonitorSelector(value *metav1.LabelSelectorApplyConfiguration) *CommonPrometheusFieldsApplyConfiguration { + b.ServiceMonitorSelector = value + return b +} + +// WithServiceMonitorNamespaceSelector sets the ServiceMonitorNamespaceSelector field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the ServiceMonitorNamespaceSelector field is set to the value of the last call. +func (b *CommonPrometheusFieldsApplyConfiguration) WithServiceMonitorNamespaceSelector(value *metav1.LabelSelectorApplyConfiguration) *CommonPrometheusFieldsApplyConfiguration { + b.ServiceMonitorNamespaceSelector = value + return b +} + +// WithPodMonitorSelector sets the PodMonitorSelector field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the PodMonitorSelector field is set to the value of the last call. +func (b *CommonPrometheusFieldsApplyConfiguration) WithPodMonitorSelector(value *metav1.LabelSelectorApplyConfiguration) *CommonPrometheusFieldsApplyConfiguration { + b.PodMonitorSelector = value + return b +} + +// WithPodMonitorNamespaceSelector sets the PodMonitorNamespaceSelector field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the PodMonitorNamespaceSelector field is set to the value of the last call. +func (b *CommonPrometheusFieldsApplyConfiguration) WithPodMonitorNamespaceSelector(value *metav1.LabelSelectorApplyConfiguration) *CommonPrometheusFieldsApplyConfiguration { + b.PodMonitorNamespaceSelector = value + return b +} + +// WithProbeSelector sets the ProbeSelector field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the ProbeSelector field is set to the value of the last call. +func (b *CommonPrometheusFieldsApplyConfiguration) WithProbeSelector(value *metav1.LabelSelectorApplyConfiguration) *CommonPrometheusFieldsApplyConfiguration { + b.ProbeSelector = value + return b +} + +// WithProbeNamespaceSelector sets the ProbeNamespaceSelector field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the ProbeNamespaceSelector field is set to the value of the last call. +func (b *CommonPrometheusFieldsApplyConfiguration) WithProbeNamespaceSelector(value *metav1.LabelSelectorApplyConfiguration) *CommonPrometheusFieldsApplyConfiguration { + b.ProbeNamespaceSelector = value + return b +} + +// WithScrapeConfigSelector sets the ScrapeConfigSelector field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the ScrapeConfigSelector field is set to the value of the last call. +func (b *CommonPrometheusFieldsApplyConfiguration) WithScrapeConfigSelector(value *metav1.LabelSelectorApplyConfiguration) *CommonPrometheusFieldsApplyConfiguration { + b.ScrapeConfigSelector = value + return b +} + +// WithScrapeConfigNamespaceSelector sets the ScrapeConfigNamespaceSelector field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the ScrapeConfigNamespaceSelector field is set to the value of the last call. +func (b *CommonPrometheusFieldsApplyConfiguration) WithScrapeConfigNamespaceSelector(value *metav1.LabelSelectorApplyConfiguration) *CommonPrometheusFieldsApplyConfiguration { + b.ScrapeConfigNamespaceSelector = value + return b +} + +// WithVersion sets the Version field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Version field is set to the value of the last call. +func (b *CommonPrometheusFieldsApplyConfiguration) WithVersion(value string) *CommonPrometheusFieldsApplyConfiguration { + b.Version = &value + return b +} + +// WithPaused sets the Paused field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Paused field is set to the value of the last call. +func (b *CommonPrometheusFieldsApplyConfiguration) WithPaused(value bool) *CommonPrometheusFieldsApplyConfiguration { + b.Paused = &value + return b +} + +// WithImage sets the Image field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Image field is set to the value of the last call. +func (b *CommonPrometheusFieldsApplyConfiguration) WithImage(value string) *CommonPrometheusFieldsApplyConfiguration { + b.Image = &value + return b +} + +// WithImagePullPolicy sets the ImagePullPolicy field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the ImagePullPolicy field is set to the value of the last call. +func (b *CommonPrometheusFieldsApplyConfiguration) WithImagePullPolicy(value corev1.PullPolicy) *CommonPrometheusFieldsApplyConfiguration { + b.ImagePullPolicy = &value + return b +} + +// WithImagePullSecrets adds the given value to the ImagePullSecrets field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the ImagePullSecrets field. +func (b *CommonPrometheusFieldsApplyConfiguration) WithImagePullSecrets(values ...corev1.LocalObjectReference) *CommonPrometheusFieldsApplyConfiguration { + for i := range values { + b.ImagePullSecrets = append(b.ImagePullSecrets, values[i]) + } + return b +} + +// WithReplicas sets the Replicas field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Replicas field is set to the value of the last call. +func (b *CommonPrometheusFieldsApplyConfiguration) WithReplicas(value int32) *CommonPrometheusFieldsApplyConfiguration { + b.Replicas = &value + return b +} + +// WithShards sets the Shards field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Shards field is set to the value of the last call. +func (b *CommonPrometheusFieldsApplyConfiguration) WithShards(value int32) *CommonPrometheusFieldsApplyConfiguration { + b.Shards = &value + return b +} + +// WithReplicaExternalLabelName sets the ReplicaExternalLabelName field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the ReplicaExternalLabelName field is set to the value of the last call. +func (b *CommonPrometheusFieldsApplyConfiguration) WithReplicaExternalLabelName(value string) *CommonPrometheusFieldsApplyConfiguration { + b.ReplicaExternalLabelName = &value + return b +} + +// WithPrometheusExternalLabelName sets the PrometheusExternalLabelName field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the PrometheusExternalLabelName field is set to the value of the last call. +func (b *CommonPrometheusFieldsApplyConfiguration) WithPrometheusExternalLabelName(value string) *CommonPrometheusFieldsApplyConfiguration { + b.PrometheusExternalLabelName = &value + return b +} + +// WithLogLevel sets the LogLevel field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the LogLevel field is set to the value of the last call. +func (b *CommonPrometheusFieldsApplyConfiguration) WithLogLevel(value string) *CommonPrometheusFieldsApplyConfiguration { + b.LogLevel = &value + return b +} + +// WithLogFormat sets the LogFormat field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the LogFormat field is set to the value of the last call. +func (b *CommonPrometheusFieldsApplyConfiguration) WithLogFormat(value string) *CommonPrometheusFieldsApplyConfiguration { + b.LogFormat = &value + return b +} + +// WithScrapeInterval sets the ScrapeInterval field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the ScrapeInterval field is set to the value of the last call. +func (b *CommonPrometheusFieldsApplyConfiguration) WithScrapeInterval(value monitoringv1.Duration) *CommonPrometheusFieldsApplyConfiguration { + b.ScrapeInterval = &value + return b +} + +// WithScrapeTimeout sets the ScrapeTimeout field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the ScrapeTimeout field is set to the value of the last call. +func (b *CommonPrometheusFieldsApplyConfiguration) WithScrapeTimeout(value monitoringv1.Duration) *CommonPrometheusFieldsApplyConfiguration { + b.ScrapeTimeout = &value + return b +} + +// WithScrapeProtocols adds the given value to the ScrapeProtocols field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the ScrapeProtocols field. +func (b *CommonPrometheusFieldsApplyConfiguration) WithScrapeProtocols(values ...monitoringv1.ScrapeProtocol) *CommonPrometheusFieldsApplyConfiguration { + for i := range values { + b.ScrapeProtocols = append(b.ScrapeProtocols, values[i]) + } + return b +} + +// WithExternalLabels puts the entries into the ExternalLabels field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, the entries provided by each call will be put on the ExternalLabels field, +// overwriting an existing map entries in ExternalLabels field with the same key. +func (b *CommonPrometheusFieldsApplyConfiguration) WithExternalLabels(entries map[string]string) *CommonPrometheusFieldsApplyConfiguration { + if b.ExternalLabels == nil && len(entries) > 0 { + b.ExternalLabels = make(map[string]string, len(entries)) + } + for k, v := range entries { + b.ExternalLabels[k] = v + } + return b +} + +// WithEnableRemoteWriteReceiver sets the EnableRemoteWriteReceiver field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the EnableRemoteWriteReceiver field is set to the value of the last call. +func (b *CommonPrometheusFieldsApplyConfiguration) WithEnableRemoteWriteReceiver(value bool) *CommonPrometheusFieldsApplyConfiguration { + b.EnableRemoteWriteReceiver = &value + return b +} + +// WithEnableOTLPReceiver sets the EnableOTLPReceiver field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the EnableOTLPReceiver field is set to the value of the last call. +func (b *CommonPrometheusFieldsApplyConfiguration) WithEnableOTLPReceiver(value bool) *CommonPrometheusFieldsApplyConfiguration { + b.EnableOTLPReceiver = &value + return b +} + +// WithRemoteWriteReceiverMessageVersions adds the given value to the RemoteWriteReceiverMessageVersions field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the RemoteWriteReceiverMessageVersions field. +func (b *CommonPrometheusFieldsApplyConfiguration) WithRemoteWriteReceiverMessageVersions(values ...monitoringv1.RemoteWriteMessageVersion) *CommonPrometheusFieldsApplyConfiguration { + for i := range values { + b.RemoteWriteReceiverMessageVersions = append(b.RemoteWriteReceiverMessageVersions, values[i]) + } + return b +} + +// WithEnableFeatures adds the given value to the EnableFeatures field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the EnableFeatures field. +func (b *CommonPrometheusFieldsApplyConfiguration) WithEnableFeatures(values ...monitoringv1.EnableFeature) *CommonPrometheusFieldsApplyConfiguration { + for i := range values { + b.EnableFeatures = append(b.EnableFeatures, values[i]) + } + return b +} + +// WithExternalURL sets the ExternalURL field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the ExternalURL field is set to the value of the last call. +func (b *CommonPrometheusFieldsApplyConfiguration) WithExternalURL(value string) *CommonPrometheusFieldsApplyConfiguration { + b.ExternalURL = &value + return b +} + +// WithRoutePrefix sets the RoutePrefix field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the RoutePrefix field is set to the value of the last call. +func (b *CommonPrometheusFieldsApplyConfiguration) WithRoutePrefix(value string) *CommonPrometheusFieldsApplyConfiguration { + b.RoutePrefix = &value + return b +} + +// WithStorage sets the Storage field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Storage field is set to the value of the last call. +func (b *CommonPrometheusFieldsApplyConfiguration) WithStorage(value *StorageSpecApplyConfiguration) *CommonPrometheusFieldsApplyConfiguration { + b.Storage = value + return b +} + +// WithVolumes adds the given value to the Volumes field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the Volumes field. +func (b *CommonPrometheusFieldsApplyConfiguration) WithVolumes(values ...corev1.Volume) *CommonPrometheusFieldsApplyConfiguration { + for i := range values { + b.Volumes = append(b.Volumes, values[i]) + } + return b +} + +// WithVolumeMounts adds the given value to the VolumeMounts field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the VolumeMounts field. +func (b *CommonPrometheusFieldsApplyConfiguration) WithVolumeMounts(values ...corev1.VolumeMount) *CommonPrometheusFieldsApplyConfiguration { + for i := range values { + b.VolumeMounts = append(b.VolumeMounts, values[i]) + } + return b +} + +// WithPersistentVolumeClaimRetentionPolicy sets the PersistentVolumeClaimRetentionPolicy field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the PersistentVolumeClaimRetentionPolicy field is set to the value of the last call. +func (b *CommonPrometheusFieldsApplyConfiguration) WithPersistentVolumeClaimRetentionPolicy(value appsv1.StatefulSetPersistentVolumeClaimRetentionPolicy) *CommonPrometheusFieldsApplyConfiguration { + b.PersistentVolumeClaimRetentionPolicy = &value + return b +} + +// WithWeb sets the Web field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Web field is set to the value of the last call. +func (b *CommonPrometheusFieldsApplyConfiguration) WithWeb(value *PrometheusWebSpecApplyConfiguration) *CommonPrometheusFieldsApplyConfiguration { + b.Web = value + return b +} + +// WithResources sets the Resources field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Resources field is set to the value of the last call. +func (b *CommonPrometheusFieldsApplyConfiguration) WithResources(value corev1.ResourceRequirements) *CommonPrometheusFieldsApplyConfiguration { + b.Resources = &value + return b +} + +// WithNodeSelector puts the entries into the NodeSelector field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, the entries provided by each call will be put on the NodeSelector field, +// overwriting an existing map entries in NodeSelector field with the same key. +func (b *CommonPrometheusFieldsApplyConfiguration) WithNodeSelector(entries map[string]string) *CommonPrometheusFieldsApplyConfiguration { + if b.NodeSelector == nil && len(entries) > 0 { + b.NodeSelector = make(map[string]string, len(entries)) + } + for k, v := range entries { + b.NodeSelector[k] = v + } + return b +} + +// WithServiceAccountName sets the ServiceAccountName field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the ServiceAccountName field is set to the value of the last call. +func (b *CommonPrometheusFieldsApplyConfiguration) WithServiceAccountName(value string) *CommonPrometheusFieldsApplyConfiguration { + b.ServiceAccountName = &value + return b +} + +// WithAutomountServiceAccountToken sets the AutomountServiceAccountToken field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the AutomountServiceAccountToken field is set to the value of the last call. +func (b *CommonPrometheusFieldsApplyConfiguration) WithAutomountServiceAccountToken(value bool) *CommonPrometheusFieldsApplyConfiguration { + b.AutomountServiceAccountToken = &value + return b +} + +// WithSecrets adds the given value to the Secrets field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the Secrets field. +func (b *CommonPrometheusFieldsApplyConfiguration) WithSecrets(values ...string) *CommonPrometheusFieldsApplyConfiguration { + for i := range values { + b.Secrets = append(b.Secrets, values[i]) + } + return b +} + +// WithConfigMaps adds the given value to the ConfigMaps field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the ConfigMaps field. +func (b *CommonPrometheusFieldsApplyConfiguration) WithConfigMaps(values ...string) *CommonPrometheusFieldsApplyConfiguration { + for i := range values { + b.ConfigMaps = append(b.ConfigMaps, values[i]) + } + return b +} + +// WithAffinity sets the Affinity field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Affinity field is set to the value of the last call. +func (b *CommonPrometheusFieldsApplyConfiguration) WithAffinity(value corev1.Affinity) *CommonPrometheusFieldsApplyConfiguration { + b.Affinity = &value + return b +} + +// WithTolerations adds the given value to the Tolerations field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the Tolerations field. +func (b *CommonPrometheusFieldsApplyConfiguration) WithTolerations(values ...corev1.Toleration) *CommonPrometheusFieldsApplyConfiguration { + for i := range values { + b.Tolerations = append(b.Tolerations, values[i]) + } + return b +} + +// WithTopologySpreadConstraints adds the given value to the TopologySpreadConstraints field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the TopologySpreadConstraints field. +func (b *CommonPrometheusFieldsApplyConfiguration) WithTopologySpreadConstraints(values ...*TopologySpreadConstraintApplyConfiguration) *CommonPrometheusFieldsApplyConfiguration { + for i := range values { + if values[i] == nil { + panic("nil value passed to WithTopologySpreadConstraints") + } + b.TopologySpreadConstraints = append(b.TopologySpreadConstraints, *values[i]) + } + return b +} + +// WithRemoteWrite adds the given value to the RemoteWrite field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the RemoteWrite field. +func (b *CommonPrometheusFieldsApplyConfiguration) WithRemoteWrite(values ...*RemoteWriteSpecApplyConfiguration) *CommonPrometheusFieldsApplyConfiguration { + for i := range values { + if values[i] == nil { + panic("nil value passed to WithRemoteWrite") + } + b.RemoteWrite = append(b.RemoteWrite, *values[i]) + } + return b +} + +// WithOTLP sets the OTLP field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the OTLP field is set to the value of the last call. +func (b *CommonPrometheusFieldsApplyConfiguration) WithOTLP(value *OTLPConfigApplyConfiguration) *CommonPrometheusFieldsApplyConfiguration { + b.OTLP = value + return b +} + +// WithSecurityContext sets the SecurityContext field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the SecurityContext field is set to the value of the last call. +func (b *CommonPrometheusFieldsApplyConfiguration) WithSecurityContext(value corev1.PodSecurityContext) *CommonPrometheusFieldsApplyConfiguration { + b.SecurityContext = &value + return b +} + +// WithDNSPolicy sets the DNSPolicy field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the DNSPolicy field is set to the value of the last call. +func (b *CommonPrometheusFieldsApplyConfiguration) WithDNSPolicy(value monitoringv1.DNSPolicy) *CommonPrometheusFieldsApplyConfiguration { + b.DNSPolicy = &value + return b +} + +// WithDNSConfig sets the DNSConfig field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the DNSConfig field is set to the value of the last call. +func (b *CommonPrometheusFieldsApplyConfiguration) WithDNSConfig(value *PodDNSConfigApplyConfiguration) *CommonPrometheusFieldsApplyConfiguration { + b.DNSConfig = value + return b +} + +// WithListenLocal sets the ListenLocal field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the ListenLocal field is set to the value of the last call. +func (b *CommonPrometheusFieldsApplyConfiguration) WithListenLocal(value bool) *CommonPrometheusFieldsApplyConfiguration { + b.ListenLocal = &value + return b +} + +// WithPodManagementPolicy sets the PodManagementPolicy field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the PodManagementPolicy field is set to the value of the last call. +func (b *CommonPrometheusFieldsApplyConfiguration) WithPodManagementPolicy(value monitoringv1.PodManagementPolicyType) *CommonPrometheusFieldsApplyConfiguration { + b.PodManagementPolicy = &value + return b +} + +// WithUpdateStrategy sets the UpdateStrategy field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the UpdateStrategy field is set to the value of the last call. +func (b *CommonPrometheusFieldsApplyConfiguration) WithUpdateStrategy(value *StatefulSetUpdateStrategyApplyConfiguration) *CommonPrometheusFieldsApplyConfiguration { + b.UpdateStrategy = value + return b +} + +// WithEnableServiceLinks sets the EnableServiceLinks field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the EnableServiceLinks field is set to the value of the last call. +func (b *CommonPrometheusFieldsApplyConfiguration) WithEnableServiceLinks(value bool) *CommonPrometheusFieldsApplyConfiguration { + b.EnableServiceLinks = &value + return b +} + +// WithContainers adds the given value to the Containers field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the Containers field. +func (b *CommonPrometheusFieldsApplyConfiguration) WithContainers(values ...corev1.Container) *CommonPrometheusFieldsApplyConfiguration { + for i := range values { + b.Containers = append(b.Containers, values[i]) + } + return b +} + +// WithInitContainers adds the given value to the InitContainers field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the InitContainers field. +func (b *CommonPrometheusFieldsApplyConfiguration) WithInitContainers(values ...corev1.Container) *CommonPrometheusFieldsApplyConfiguration { + for i := range values { + b.InitContainers = append(b.InitContainers, values[i]) + } + return b +} + +// WithAdditionalScrapeConfigs sets the AdditionalScrapeConfigs field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the AdditionalScrapeConfigs field is set to the value of the last call. +func (b *CommonPrometheusFieldsApplyConfiguration) WithAdditionalScrapeConfigs(value corev1.SecretKeySelector) *CommonPrometheusFieldsApplyConfiguration { + b.AdditionalScrapeConfigs = &value + return b +} + +// WithAPIServerConfig sets the APIServerConfig field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the APIServerConfig field is set to the value of the last call. +func (b *CommonPrometheusFieldsApplyConfiguration) WithAPIServerConfig(value *APIServerConfigApplyConfiguration) *CommonPrometheusFieldsApplyConfiguration { + b.APIServerConfig = value + return b +} + +// WithPriorityClassName sets the PriorityClassName field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the PriorityClassName field is set to the value of the last call. +func (b *CommonPrometheusFieldsApplyConfiguration) WithPriorityClassName(value string) *CommonPrometheusFieldsApplyConfiguration { + b.PriorityClassName = &value + return b +} + +// WithPortName sets the PortName field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the PortName field is set to the value of the last call. +func (b *CommonPrometheusFieldsApplyConfiguration) WithPortName(value string) *CommonPrometheusFieldsApplyConfiguration { + b.PortName = &value + return b +} + +// WithArbitraryFSAccessThroughSMs sets the ArbitraryFSAccessThroughSMs field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the ArbitraryFSAccessThroughSMs field is set to the value of the last call. +func (b *CommonPrometheusFieldsApplyConfiguration) WithArbitraryFSAccessThroughSMs(value *ArbitraryFSAccessThroughSMsConfigApplyConfiguration) *CommonPrometheusFieldsApplyConfiguration { + b.ArbitraryFSAccessThroughSMs = value + return b +} + +// WithOverrideHonorLabels sets the OverrideHonorLabels field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the OverrideHonorLabels field is set to the value of the last call. +func (b *CommonPrometheusFieldsApplyConfiguration) WithOverrideHonorLabels(value bool) *CommonPrometheusFieldsApplyConfiguration { + b.OverrideHonorLabels = &value + return b +} + +// WithOverrideHonorTimestamps sets the OverrideHonorTimestamps field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the OverrideHonorTimestamps field is set to the value of the last call. +func (b *CommonPrometheusFieldsApplyConfiguration) WithOverrideHonorTimestamps(value bool) *CommonPrometheusFieldsApplyConfiguration { + b.OverrideHonorTimestamps = &value + return b +} + +// WithIgnoreNamespaceSelectors sets the IgnoreNamespaceSelectors field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the IgnoreNamespaceSelectors field is set to the value of the last call. +func (b *CommonPrometheusFieldsApplyConfiguration) WithIgnoreNamespaceSelectors(value bool) *CommonPrometheusFieldsApplyConfiguration { + b.IgnoreNamespaceSelectors = &value + return b +} + +// WithEnforcedNamespaceLabel sets the EnforcedNamespaceLabel field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the EnforcedNamespaceLabel field is set to the value of the last call. +func (b *CommonPrometheusFieldsApplyConfiguration) WithEnforcedNamespaceLabel(value string) *CommonPrometheusFieldsApplyConfiguration { + b.EnforcedNamespaceLabel = &value + return b +} + +// WithEnforcedSampleLimit sets the EnforcedSampleLimit field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the EnforcedSampleLimit field is set to the value of the last call. +func (b *CommonPrometheusFieldsApplyConfiguration) WithEnforcedSampleLimit(value uint64) *CommonPrometheusFieldsApplyConfiguration { + b.EnforcedSampleLimit = &value + return b +} + +// WithEnforcedTargetLimit sets the EnforcedTargetLimit field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the EnforcedTargetLimit field is set to the value of the last call. +func (b *CommonPrometheusFieldsApplyConfiguration) WithEnforcedTargetLimit(value uint64) *CommonPrometheusFieldsApplyConfiguration { + b.EnforcedTargetLimit = &value + return b +} + +// WithEnforcedLabelLimit sets the EnforcedLabelLimit field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the EnforcedLabelLimit field is set to the value of the last call. +func (b *CommonPrometheusFieldsApplyConfiguration) WithEnforcedLabelLimit(value uint64) *CommonPrometheusFieldsApplyConfiguration { + b.EnforcedLabelLimit = &value + return b +} + +// WithEnforcedLabelNameLengthLimit sets the EnforcedLabelNameLengthLimit field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the EnforcedLabelNameLengthLimit field is set to the value of the last call. +func (b *CommonPrometheusFieldsApplyConfiguration) WithEnforcedLabelNameLengthLimit(value uint64) *CommonPrometheusFieldsApplyConfiguration { + b.EnforcedLabelNameLengthLimit = &value + return b +} + +// WithEnforcedLabelValueLengthLimit sets the EnforcedLabelValueLengthLimit field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the EnforcedLabelValueLengthLimit field is set to the value of the last call. +func (b *CommonPrometheusFieldsApplyConfiguration) WithEnforcedLabelValueLengthLimit(value uint64) *CommonPrometheusFieldsApplyConfiguration { + b.EnforcedLabelValueLengthLimit = &value + return b +} + +// WithEnforcedKeepDroppedTargets sets the EnforcedKeepDroppedTargets field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the EnforcedKeepDroppedTargets field is set to the value of the last call. +func (b *CommonPrometheusFieldsApplyConfiguration) WithEnforcedKeepDroppedTargets(value uint64) *CommonPrometheusFieldsApplyConfiguration { + b.EnforcedKeepDroppedTargets = &value + return b +} + +// WithEnforcedBodySizeLimit sets the EnforcedBodySizeLimit field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the EnforcedBodySizeLimit field is set to the value of the last call. +func (b *CommonPrometheusFieldsApplyConfiguration) WithEnforcedBodySizeLimit(value monitoringv1.ByteSize) *CommonPrometheusFieldsApplyConfiguration { + b.EnforcedBodySizeLimit = &value + return b +} + +// WithNameValidationScheme sets the NameValidationScheme field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the NameValidationScheme field is set to the value of the last call. +func (b *CommonPrometheusFieldsApplyConfiguration) WithNameValidationScheme(value monitoringv1.NameValidationSchemeOptions) *CommonPrometheusFieldsApplyConfiguration { + b.NameValidationScheme = &value + return b +} + +// WithNameEscapingScheme sets the NameEscapingScheme field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the NameEscapingScheme field is set to the value of the last call. +func (b *CommonPrometheusFieldsApplyConfiguration) WithNameEscapingScheme(value monitoringv1.NameEscapingSchemeOptions) *CommonPrometheusFieldsApplyConfiguration { + b.NameEscapingScheme = &value + return b +} + +// WithConvertClassicHistogramsToNHCB sets the ConvertClassicHistogramsToNHCB field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the ConvertClassicHistogramsToNHCB field is set to the value of the last call. +func (b *CommonPrometheusFieldsApplyConfiguration) WithConvertClassicHistogramsToNHCB(value bool) *CommonPrometheusFieldsApplyConfiguration { + b.ConvertClassicHistogramsToNHCB = &value + return b +} + +// WithScrapeNativeHistograms sets the ScrapeNativeHistograms field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the ScrapeNativeHistograms field is set to the value of the last call. +func (b *CommonPrometheusFieldsApplyConfiguration) WithScrapeNativeHistograms(value bool) *CommonPrometheusFieldsApplyConfiguration { + b.ScrapeNativeHistograms = &value + return b +} + +// WithScrapeClassicHistograms sets the ScrapeClassicHistograms field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the ScrapeClassicHistograms field is set to the value of the last call. +func (b *CommonPrometheusFieldsApplyConfiguration) WithScrapeClassicHistograms(value bool) *CommonPrometheusFieldsApplyConfiguration { + b.ScrapeClassicHistograms = &value + return b +} + +// WithMinReadySeconds sets the MinReadySeconds field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the MinReadySeconds field is set to the value of the last call. +func (b *CommonPrometheusFieldsApplyConfiguration) WithMinReadySeconds(value int32) *CommonPrometheusFieldsApplyConfiguration { + b.MinReadySeconds = &value + return b +} + +// WithHostAliases adds the given value to the HostAliases field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the HostAliases field. +func (b *CommonPrometheusFieldsApplyConfiguration) WithHostAliases(values ...*HostAliasApplyConfiguration) *CommonPrometheusFieldsApplyConfiguration { + for i := range values { + if values[i] == nil { + panic("nil value passed to WithHostAliases") + } + b.HostAliases = append(b.HostAliases, *values[i]) + } + return b +} + +// WithAdditionalArgs adds the given value to the AdditionalArgs field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the AdditionalArgs field. +func (b *CommonPrometheusFieldsApplyConfiguration) WithAdditionalArgs(values ...*ArgumentApplyConfiguration) *CommonPrometheusFieldsApplyConfiguration { + for i := range values { + if values[i] == nil { + panic("nil value passed to WithAdditionalArgs") + } + b.AdditionalArgs = append(b.AdditionalArgs, *values[i]) + } + return b +} + +// WithWALCompression sets the WALCompression field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the WALCompression field is set to the value of the last call. +func (b *CommonPrometheusFieldsApplyConfiguration) WithWALCompression(value bool) *CommonPrometheusFieldsApplyConfiguration { + b.WALCompression = &value + return b +} + +// WithExcludedFromEnforcement adds the given value to the ExcludedFromEnforcement field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the ExcludedFromEnforcement field. +func (b *CommonPrometheusFieldsApplyConfiguration) WithExcludedFromEnforcement(values ...*ObjectReferenceApplyConfiguration) *CommonPrometheusFieldsApplyConfiguration { + for i := range values { + if values[i] == nil { + panic("nil value passed to WithExcludedFromEnforcement") + } + b.ExcludedFromEnforcement = append(b.ExcludedFromEnforcement, *values[i]) + } + return b +} + +// WithHostNetwork sets the HostNetwork field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the HostNetwork field is set to the value of the last call. +func (b *CommonPrometheusFieldsApplyConfiguration) WithHostNetwork(value bool) *CommonPrometheusFieldsApplyConfiguration { + b.HostNetwork = &value + return b +} + +// WithPodTargetLabels adds the given value to the PodTargetLabels field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the PodTargetLabels field. +func (b *CommonPrometheusFieldsApplyConfiguration) WithPodTargetLabels(values ...string) *CommonPrometheusFieldsApplyConfiguration { + for i := range values { + b.PodTargetLabels = append(b.PodTargetLabels, values[i]) + } + return b +} + +// WithTracingConfig sets the TracingConfig field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the TracingConfig field is set to the value of the last call. +func (b *CommonPrometheusFieldsApplyConfiguration) WithTracingConfig(value *TracingConfigApplyConfiguration) *CommonPrometheusFieldsApplyConfiguration { + b.TracingConfig = value + return b +} + +// WithBodySizeLimit sets the BodySizeLimit field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the BodySizeLimit field is set to the value of the last call. +func (b *CommonPrometheusFieldsApplyConfiguration) WithBodySizeLimit(value monitoringv1.ByteSize) *CommonPrometheusFieldsApplyConfiguration { + b.BodySizeLimit = &value + return b +} + +// WithSampleLimit sets the SampleLimit field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the SampleLimit field is set to the value of the last call. +func (b *CommonPrometheusFieldsApplyConfiguration) WithSampleLimit(value uint64) *CommonPrometheusFieldsApplyConfiguration { + b.SampleLimit = &value + return b +} + +// WithTargetLimit sets the TargetLimit field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the TargetLimit field is set to the value of the last call. +func (b *CommonPrometheusFieldsApplyConfiguration) WithTargetLimit(value uint64) *CommonPrometheusFieldsApplyConfiguration { + b.TargetLimit = &value + return b +} + +// WithLabelLimit sets the LabelLimit field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the LabelLimit field is set to the value of the last call. +func (b *CommonPrometheusFieldsApplyConfiguration) WithLabelLimit(value uint64) *CommonPrometheusFieldsApplyConfiguration { + b.LabelLimit = &value + return b +} + +// WithLabelNameLengthLimit sets the LabelNameLengthLimit field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the LabelNameLengthLimit field is set to the value of the last call. +func (b *CommonPrometheusFieldsApplyConfiguration) WithLabelNameLengthLimit(value uint64) *CommonPrometheusFieldsApplyConfiguration { + b.LabelNameLengthLimit = &value + return b +} + +// WithLabelValueLengthLimit sets the LabelValueLengthLimit field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the LabelValueLengthLimit field is set to the value of the last call. +func (b *CommonPrometheusFieldsApplyConfiguration) WithLabelValueLengthLimit(value uint64) *CommonPrometheusFieldsApplyConfiguration { + b.LabelValueLengthLimit = &value + return b +} + +// WithKeepDroppedTargets sets the KeepDroppedTargets field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the KeepDroppedTargets field is set to the value of the last call. +func (b *CommonPrometheusFieldsApplyConfiguration) WithKeepDroppedTargets(value uint64) *CommonPrometheusFieldsApplyConfiguration { + b.KeepDroppedTargets = &value + return b +} + +// WithReloadStrategy sets the ReloadStrategy field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the ReloadStrategy field is set to the value of the last call. +func (b *CommonPrometheusFieldsApplyConfiguration) WithReloadStrategy(value monitoringv1.ReloadStrategyType) *CommonPrometheusFieldsApplyConfiguration { + b.ReloadStrategy = &value + return b +} + +// WithMaximumStartupDurationSeconds sets the MaximumStartupDurationSeconds field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the MaximumStartupDurationSeconds field is set to the value of the last call. +func (b *CommonPrometheusFieldsApplyConfiguration) WithMaximumStartupDurationSeconds(value int32) *CommonPrometheusFieldsApplyConfiguration { + b.MaximumStartupDurationSeconds = &value + return b +} + +// WithScrapeClasses adds the given value to the ScrapeClasses field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the ScrapeClasses field. +func (b *CommonPrometheusFieldsApplyConfiguration) WithScrapeClasses(values ...*ScrapeClassApplyConfiguration) *CommonPrometheusFieldsApplyConfiguration { + for i := range values { + if values[i] == nil { + panic("nil value passed to WithScrapeClasses") + } + b.ScrapeClasses = append(b.ScrapeClasses, *values[i]) + } + return b +} + +// WithServiceDiscoveryRole sets the ServiceDiscoveryRole field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the ServiceDiscoveryRole field is set to the value of the last call. +func (b *CommonPrometheusFieldsApplyConfiguration) WithServiceDiscoveryRole(value monitoringv1.ServiceDiscoveryRole) *CommonPrometheusFieldsApplyConfiguration { + b.ServiceDiscoveryRole = &value + return b +} + +// WithTSDB sets the TSDB field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the TSDB field is set to the value of the last call. +func (b *CommonPrometheusFieldsApplyConfiguration) WithTSDB(value *TSDBSpecApplyConfiguration) *CommonPrometheusFieldsApplyConfiguration { + b.TSDB = value + return b +} + +// WithScrapeFailureLogFile sets the ScrapeFailureLogFile field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the ScrapeFailureLogFile field is set to the value of the last call. +func (b *CommonPrometheusFieldsApplyConfiguration) WithScrapeFailureLogFile(value string) *CommonPrometheusFieldsApplyConfiguration { + b.ScrapeFailureLogFile = &value + return b +} + +// WithServiceName sets the ServiceName field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the ServiceName field is set to the value of the last call. +func (b *CommonPrometheusFieldsApplyConfiguration) WithServiceName(value string) *CommonPrometheusFieldsApplyConfiguration { + b.ServiceName = &value + return b +} + +// WithRuntime sets the Runtime field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Runtime field is set to the value of the last call. +func (b *CommonPrometheusFieldsApplyConfiguration) WithRuntime(value *RuntimeConfigApplyConfiguration) *CommonPrometheusFieldsApplyConfiguration { + b.Runtime = value + return b +} + +// WithTerminationGracePeriodSeconds sets the TerminationGracePeriodSeconds field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the TerminationGracePeriodSeconds field is set to the value of the last call. +func (b *CommonPrometheusFieldsApplyConfiguration) WithTerminationGracePeriodSeconds(value int64) *CommonPrometheusFieldsApplyConfiguration { + b.TerminationGracePeriodSeconds = &value + return b +} + +// WithHostUsers sets the HostUsers field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the HostUsers field is set to the value of the last call. +func (b *CommonPrometheusFieldsApplyConfiguration) WithHostUsers(value bool) *CommonPrometheusFieldsApplyConfiguration { + b.HostUsers = &value + return b +} diff --git a/vendor/github.com/prometheus-operator/prometheus-operator/pkg/client/applyconfiguration/monitoring/v1/condition.go b/vendor/github.com/prometheus-operator/prometheus-operator/pkg/client/applyconfiguration/monitoring/v1/condition.go new file mode 100644 index 0000000000..bed7a8074a --- /dev/null +++ b/vendor/github.com/prometheus-operator/prometheus-operator/pkg/client/applyconfiguration/monitoring/v1/condition.go @@ -0,0 +1,87 @@ +// Copyright The prometheus-operator Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1 + +import ( + monitoringv1 "github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +// ConditionApplyConfiguration represents a declarative configuration of the Condition type for use +// with apply. +type ConditionApplyConfiguration struct { + Type *monitoringv1.ConditionType `json:"type,omitempty"` + Status *monitoringv1.ConditionStatus `json:"status,omitempty"` + LastTransitionTime *metav1.Time `json:"lastTransitionTime,omitempty"` + Reason *string `json:"reason,omitempty"` + Message *string `json:"message,omitempty"` + ObservedGeneration *int64 `json:"observedGeneration,omitempty"` +} + +// ConditionApplyConfiguration constructs a declarative configuration of the Condition type for use with +// apply. +func Condition() *ConditionApplyConfiguration { + return &ConditionApplyConfiguration{} +} + +// WithType sets the Type field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Type field is set to the value of the last call. +func (b *ConditionApplyConfiguration) WithType(value monitoringv1.ConditionType) *ConditionApplyConfiguration { + b.Type = &value + return b +} + +// WithStatus sets the Status field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Status field is set to the value of the last call. +func (b *ConditionApplyConfiguration) WithStatus(value monitoringv1.ConditionStatus) *ConditionApplyConfiguration { + b.Status = &value + return b +} + +// WithLastTransitionTime sets the LastTransitionTime field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the LastTransitionTime field is set to the value of the last call. +func (b *ConditionApplyConfiguration) WithLastTransitionTime(value metav1.Time) *ConditionApplyConfiguration { + b.LastTransitionTime = &value + return b +} + +// WithReason sets the Reason field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Reason field is set to the value of the last call. +func (b *ConditionApplyConfiguration) WithReason(value string) *ConditionApplyConfiguration { + b.Reason = &value + return b +} + +// WithMessage sets the Message field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Message field is set to the value of the last call. +func (b *ConditionApplyConfiguration) WithMessage(value string) *ConditionApplyConfiguration { + b.Message = &value + return b +} + +// WithObservedGeneration sets the ObservedGeneration field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the ObservedGeneration field is set to the value of the last call. +func (b *ConditionApplyConfiguration) WithObservedGeneration(value int64) *ConditionApplyConfiguration { + b.ObservedGeneration = &value + return b +} diff --git a/vendor/github.com/prometheus-operator/prometheus-operator/pkg/client/applyconfiguration/monitoring/v1/configresourcecondition.go b/vendor/github.com/prometheus-operator/prometheus-operator/pkg/client/applyconfiguration/monitoring/v1/configresourcecondition.go new file mode 100644 index 0000000000..8a1d5a5ae7 --- /dev/null +++ b/vendor/github.com/prometheus-operator/prometheus-operator/pkg/client/applyconfiguration/monitoring/v1/configresourcecondition.go @@ -0,0 +1,87 @@ +// Copyright The prometheus-operator Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1 + +import ( + monitoringv1 "github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +// ConfigResourceConditionApplyConfiguration represents a declarative configuration of the ConfigResourceCondition type for use +// with apply. +type ConfigResourceConditionApplyConfiguration struct { + Type *monitoringv1.ConditionType `json:"type,omitempty"` + Status *monitoringv1.ConditionStatus `json:"status,omitempty"` + LastTransitionTime *metav1.Time `json:"lastTransitionTime,omitempty"` + Reason *string `json:"reason,omitempty"` + Message *string `json:"message,omitempty"` + ObservedGeneration *int64 `json:"observedGeneration,omitempty"` +} + +// ConfigResourceConditionApplyConfiguration constructs a declarative configuration of the ConfigResourceCondition type for use with +// apply. +func ConfigResourceCondition() *ConfigResourceConditionApplyConfiguration { + return &ConfigResourceConditionApplyConfiguration{} +} + +// WithType sets the Type field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Type field is set to the value of the last call. +func (b *ConfigResourceConditionApplyConfiguration) WithType(value monitoringv1.ConditionType) *ConfigResourceConditionApplyConfiguration { + b.Type = &value + return b +} + +// WithStatus sets the Status field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Status field is set to the value of the last call. +func (b *ConfigResourceConditionApplyConfiguration) WithStatus(value monitoringv1.ConditionStatus) *ConfigResourceConditionApplyConfiguration { + b.Status = &value + return b +} + +// WithLastTransitionTime sets the LastTransitionTime field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the LastTransitionTime field is set to the value of the last call. +func (b *ConfigResourceConditionApplyConfiguration) WithLastTransitionTime(value metav1.Time) *ConfigResourceConditionApplyConfiguration { + b.LastTransitionTime = &value + return b +} + +// WithReason sets the Reason field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Reason field is set to the value of the last call. +func (b *ConfigResourceConditionApplyConfiguration) WithReason(value string) *ConfigResourceConditionApplyConfiguration { + b.Reason = &value + return b +} + +// WithMessage sets the Message field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Message field is set to the value of the last call. +func (b *ConfigResourceConditionApplyConfiguration) WithMessage(value string) *ConfigResourceConditionApplyConfiguration { + b.Message = &value + return b +} + +// WithObservedGeneration sets the ObservedGeneration field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the ObservedGeneration field is set to the value of the last call. +func (b *ConfigResourceConditionApplyConfiguration) WithObservedGeneration(value int64) *ConfigResourceConditionApplyConfiguration { + b.ObservedGeneration = &value + return b +} diff --git a/vendor/github.com/prometheus-operator/prometheus-operator/pkg/client/applyconfiguration/monitoring/v1/configresourcestatus.go b/vendor/github.com/prometheus-operator/prometheus-operator/pkg/client/applyconfiguration/monitoring/v1/configresourcestatus.go new file mode 100644 index 0000000000..cb9fe8bb12 --- /dev/null +++ b/vendor/github.com/prometheus-operator/prometheus-operator/pkg/client/applyconfiguration/monitoring/v1/configresourcestatus.go @@ -0,0 +1,42 @@ +// Copyright The prometheus-operator Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1 + +// ConfigResourceStatusApplyConfiguration represents a declarative configuration of the ConfigResourceStatus type for use +// with apply. +type ConfigResourceStatusApplyConfiguration struct { + Bindings []WorkloadBindingApplyConfiguration `json:"bindings,omitempty"` +} + +// ConfigResourceStatusApplyConfiguration constructs a declarative configuration of the ConfigResourceStatus type for use with +// apply. +func ConfigResourceStatus() *ConfigResourceStatusApplyConfiguration { + return &ConfigResourceStatusApplyConfiguration{} +} + +// WithBindings adds the given value to the Bindings field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the Bindings field. +func (b *ConfigResourceStatusApplyConfiguration) WithBindings(values ...*WorkloadBindingApplyConfiguration) *ConfigResourceStatusApplyConfiguration { + for i := range values { + if values[i] == nil { + panic("nil value passed to WithBindings") + } + b.Bindings = append(b.Bindings, *values[i]) + } + return b +} diff --git a/vendor/github.com/prometheus-operator/prometheus-operator/pkg/client/applyconfiguration/monitoring/v1/corev1topologyspreadconstraint.go b/vendor/github.com/prometheus-operator/prometheus-operator/pkg/client/applyconfiguration/monitoring/v1/corev1topologyspreadconstraint.go new file mode 100644 index 0000000000..df84892529 --- /dev/null +++ b/vendor/github.com/prometheus-operator/prometheus-operator/pkg/client/applyconfiguration/monitoring/v1/corev1topologyspreadconstraint.go @@ -0,0 +1,107 @@ +// Copyright The prometheus-operator Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1 + +import ( + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/client-go/applyconfigurations/meta/v1" +) + +// CoreV1TopologySpreadConstraintApplyConfiguration represents a declarative configuration of the CoreV1TopologySpreadConstraint type for use +// with apply. +type CoreV1TopologySpreadConstraintApplyConfiguration struct { + MaxSkew *int32 `json:"maxSkew,omitempty"` + TopologyKey *string `json:"topologyKey,omitempty"` + WhenUnsatisfiable *corev1.UnsatisfiableConstraintAction `json:"whenUnsatisfiable,omitempty"` + LabelSelector *metav1.LabelSelectorApplyConfiguration `json:"labelSelector,omitempty"` + MinDomains *int32 `json:"minDomains,omitempty"` + NodeAffinityPolicy *corev1.NodeInclusionPolicy `json:"nodeAffinityPolicy,omitempty"` + NodeTaintsPolicy *corev1.NodeInclusionPolicy `json:"nodeTaintsPolicy,omitempty"` + MatchLabelKeys []string `json:"matchLabelKeys,omitempty"` +} + +// CoreV1TopologySpreadConstraintApplyConfiguration constructs a declarative configuration of the CoreV1TopologySpreadConstraint type for use with +// apply. +func CoreV1TopologySpreadConstraint() *CoreV1TopologySpreadConstraintApplyConfiguration { + return &CoreV1TopologySpreadConstraintApplyConfiguration{} +} + +// WithMaxSkew sets the MaxSkew field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the MaxSkew field is set to the value of the last call. +func (b *CoreV1TopologySpreadConstraintApplyConfiguration) WithMaxSkew(value int32) *CoreV1TopologySpreadConstraintApplyConfiguration { + b.MaxSkew = &value + return b +} + +// WithTopologyKey sets the TopologyKey field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the TopologyKey field is set to the value of the last call. +func (b *CoreV1TopologySpreadConstraintApplyConfiguration) WithTopologyKey(value string) *CoreV1TopologySpreadConstraintApplyConfiguration { + b.TopologyKey = &value + return b +} + +// WithWhenUnsatisfiable sets the WhenUnsatisfiable field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the WhenUnsatisfiable field is set to the value of the last call. +func (b *CoreV1TopologySpreadConstraintApplyConfiguration) WithWhenUnsatisfiable(value corev1.UnsatisfiableConstraintAction) *CoreV1TopologySpreadConstraintApplyConfiguration { + b.WhenUnsatisfiable = &value + return b +} + +// WithLabelSelector sets the LabelSelector field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the LabelSelector field is set to the value of the last call. +func (b *CoreV1TopologySpreadConstraintApplyConfiguration) WithLabelSelector(value *metav1.LabelSelectorApplyConfiguration) *CoreV1TopologySpreadConstraintApplyConfiguration { + b.LabelSelector = value + return b +} + +// WithMinDomains sets the MinDomains field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the MinDomains field is set to the value of the last call. +func (b *CoreV1TopologySpreadConstraintApplyConfiguration) WithMinDomains(value int32) *CoreV1TopologySpreadConstraintApplyConfiguration { + b.MinDomains = &value + return b +} + +// WithNodeAffinityPolicy sets the NodeAffinityPolicy field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the NodeAffinityPolicy field is set to the value of the last call. +func (b *CoreV1TopologySpreadConstraintApplyConfiguration) WithNodeAffinityPolicy(value corev1.NodeInclusionPolicy) *CoreV1TopologySpreadConstraintApplyConfiguration { + b.NodeAffinityPolicy = &value + return b +} + +// WithNodeTaintsPolicy sets the NodeTaintsPolicy field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the NodeTaintsPolicy field is set to the value of the last call. +func (b *CoreV1TopologySpreadConstraintApplyConfiguration) WithNodeTaintsPolicy(value corev1.NodeInclusionPolicy) *CoreV1TopologySpreadConstraintApplyConfiguration { + b.NodeTaintsPolicy = &value + return b +} + +// WithMatchLabelKeys adds the given value to the MatchLabelKeys field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the MatchLabelKeys field. +func (b *CoreV1TopologySpreadConstraintApplyConfiguration) WithMatchLabelKeys(values ...string) *CoreV1TopologySpreadConstraintApplyConfiguration { + for i := range values { + b.MatchLabelKeys = append(b.MatchLabelKeys, values[i]) + } + return b +} diff --git a/vendor/github.com/prometheus-operator/prometheus-operator/pkg/client/applyconfiguration/monitoring/v1/embeddedobjectmetadata.go b/vendor/github.com/prometheus-operator/prometheus-operator/pkg/client/applyconfiguration/monitoring/v1/embeddedobjectmetadata.go new file mode 100644 index 0000000000..7258258d53 --- /dev/null +++ b/vendor/github.com/prometheus-operator/prometheus-operator/pkg/client/applyconfiguration/monitoring/v1/embeddedobjectmetadata.go @@ -0,0 +1,67 @@ +// Copyright The prometheus-operator Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1 + +// EmbeddedObjectMetadataApplyConfiguration represents a declarative configuration of the EmbeddedObjectMetadata type for use +// with apply. +type EmbeddedObjectMetadataApplyConfiguration struct { + Name *string `json:"name,omitempty"` + Labels map[string]string `json:"labels,omitempty"` + Annotations map[string]string `json:"annotations,omitempty"` +} + +// EmbeddedObjectMetadataApplyConfiguration constructs a declarative configuration of the EmbeddedObjectMetadata type for use with +// apply. +func EmbeddedObjectMetadata() *EmbeddedObjectMetadataApplyConfiguration { + return &EmbeddedObjectMetadataApplyConfiguration{} +} + +// WithName sets the Name field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Name field is set to the value of the last call. +func (b *EmbeddedObjectMetadataApplyConfiguration) WithName(value string) *EmbeddedObjectMetadataApplyConfiguration { + b.Name = &value + return b +} + +// WithLabels puts the entries into the Labels field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, the entries provided by each call will be put on the Labels field, +// overwriting an existing map entries in Labels field with the same key. +func (b *EmbeddedObjectMetadataApplyConfiguration) WithLabels(entries map[string]string) *EmbeddedObjectMetadataApplyConfiguration { + if b.Labels == nil && len(entries) > 0 { + b.Labels = make(map[string]string, len(entries)) + } + for k, v := range entries { + b.Labels[k] = v + } + return b +} + +// WithAnnotations puts the entries into the Annotations field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, the entries provided by each call will be put on the Annotations field, +// overwriting an existing map entries in Annotations field with the same key. +func (b *EmbeddedObjectMetadataApplyConfiguration) WithAnnotations(entries map[string]string) *EmbeddedObjectMetadataApplyConfiguration { + if b.Annotations == nil && len(entries) > 0 { + b.Annotations = make(map[string]string, len(entries)) + } + for k, v := range entries { + b.Annotations[k] = v + } + return b +} diff --git a/vendor/github.com/prometheus-operator/prometheus-operator/pkg/client/applyconfiguration/monitoring/v1/embeddedpersistentvolumeclaim.go b/vendor/github.com/prometheus-operator/prometheus-operator/pkg/client/applyconfiguration/monitoring/v1/embeddedpersistentvolumeclaim.go new file mode 100644 index 0000000000..f6d450c219 --- /dev/null +++ b/vendor/github.com/prometheus-operator/prometheus-operator/pkg/client/applyconfiguration/monitoring/v1/embeddedpersistentvolumeclaim.go @@ -0,0 +1,128 @@ +// Copyright The prometheus-operator Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1 + +import ( + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/client-go/applyconfigurations/meta/v1" +) + +// EmbeddedPersistentVolumeClaimApplyConfiguration represents a declarative configuration of the EmbeddedPersistentVolumeClaim type for use +// with apply. +type EmbeddedPersistentVolumeClaimApplyConfiguration struct { + metav1.TypeMetaApplyConfiguration `json:",inline"` + *EmbeddedObjectMetadataApplyConfiguration `json:"metadata,omitempty"` + Spec *corev1.PersistentVolumeClaimSpec `json:"spec,omitempty"` + Status *corev1.PersistentVolumeClaimStatus `json:"status,omitempty"` +} + +// EmbeddedPersistentVolumeClaimApplyConfiguration constructs a declarative configuration of the EmbeddedPersistentVolumeClaim type for use with +// apply. +func EmbeddedPersistentVolumeClaim() *EmbeddedPersistentVolumeClaimApplyConfiguration { + b := &EmbeddedPersistentVolumeClaimApplyConfiguration{} + b.WithKind("EmbeddedPersistentVolumeClaim") + b.WithAPIVersion("monitoring.coreos.com/v1") + return b +} +func (b EmbeddedPersistentVolumeClaimApplyConfiguration) IsApplyConfiguration() {} + +// WithKind sets the Kind field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Kind field is set to the value of the last call. +func (b *EmbeddedPersistentVolumeClaimApplyConfiguration) WithKind(value string) *EmbeddedPersistentVolumeClaimApplyConfiguration { + b.TypeMetaApplyConfiguration.Kind = &value + return b +} + +// WithAPIVersion sets the APIVersion field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the APIVersion field is set to the value of the last call. +func (b *EmbeddedPersistentVolumeClaimApplyConfiguration) WithAPIVersion(value string) *EmbeddedPersistentVolumeClaimApplyConfiguration { + b.TypeMetaApplyConfiguration.APIVersion = &value + return b +} + +// WithName sets the Name field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Name field is set to the value of the last call. +func (b *EmbeddedPersistentVolumeClaimApplyConfiguration) WithName(value string) *EmbeddedPersistentVolumeClaimApplyConfiguration { + b.ensureEmbeddedObjectMetadataApplyConfigurationExists() + b.EmbeddedObjectMetadataApplyConfiguration.Name = &value + return b +} + +// WithLabels puts the entries into the Labels field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, the entries provided by each call will be put on the Labels field, +// overwriting an existing map entries in Labels field with the same key. +func (b *EmbeddedPersistentVolumeClaimApplyConfiguration) WithLabels(entries map[string]string) *EmbeddedPersistentVolumeClaimApplyConfiguration { + b.ensureEmbeddedObjectMetadataApplyConfigurationExists() + if b.EmbeddedObjectMetadataApplyConfiguration.Labels == nil && len(entries) > 0 { + b.EmbeddedObjectMetadataApplyConfiguration.Labels = make(map[string]string, len(entries)) + } + for k, v := range entries { + b.EmbeddedObjectMetadataApplyConfiguration.Labels[k] = v + } + return b +} + +// WithAnnotations puts the entries into the Annotations field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, the entries provided by each call will be put on the Annotations field, +// overwriting an existing map entries in Annotations field with the same key. +func (b *EmbeddedPersistentVolumeClaimApplyConfiguration) WithAnnotations(entries map[string]string) *EmbeddedPersistentVolumeClaimApplyConfiguration { + b.ensureEmbeddedObjectMetadataApplyConfigurationExists() + if b.EmbeddedObjectMetadataApplyConfiguration.Annotations == nil && len(entries) > 0 { + b.EmbeddedObjectMetadataApplyConfiguration.Annotations = make(map[string]string, len(entries)) + } + for k, v := range entries { + b.EmbeddedObjectMetadataApplyConfiguration.Annotations[k] = v + } + return b +} + +func (b *EmbeddedPersistentVolumeClaimApplyConfiguration) ensureEmbeddedObjectMetadataApplyConfigurationExists() { + if b.EmbeddedObjectMetadataApplyConfiguration == nil { + b.EmbeddedObjectMetadataApplyConfiguration = &EmbeddedObjectMetadataApplyConfiguration{} + } +} + +// WithSpec sets the Spec field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Spec field is set to the value of the last call. +func (b *EmbeddedPersistentVolumeClaimApplyConfiguration) WithSpec(value corev1.PersistentVolumeClaimSpec) *EmbeddedPersistentVolumeClaimApplyConfiguration { + b.Spec = &value + return b +} + +// WithStatus sets the Status field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Status field is set to the value of the last call. +func (b *EmbeddedPersistentVolumeClaimApplyConfiguration) WithStatus(value corev1.PersistentVolumeClaimStatus) *EmbeddedPersistentVolumeClaimApplyConfiguration { + b.Status = &value + return b +} + +// GetKind retrieves the value of the Kind field in the declarative configuration. +func (b *EmbeddedPersistentVolumeClaimApplyConfiguration) GetKind() *string { + return b.TypeMetaApplyConfiguration.Kind +} + +// GetAPIVersion retrieves the value of the APIVersion field in the declarative configuration. +func (b *EmbeddedPersistentVolumeClaimApplyConfiguration) GetAPIVersion() *string { + return b.TypeMetaApplyConfiguration.APIVersion +} diff --git a/vendor/github.com/prometheus-operator/prometheus-operator/pkg/client/applyconfiguration/monitoring/v1/endpoint.go b/vendor/github.com/prometheus-operator/prometheus-operator/pkg/client/applyconfiguration/monitoring/v1/endpoint.go new file mode 100644 index 0000000000..0deb3e8d17 --- /dev/null +++ b/vendor/github.com/prometheus-operator/prometheus-operator/pkg/client/applyconfiguration/monitoring/v1/endpoint.go @@ -0,0 +1,271 @@ +// Copyright The prometheus-operator Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1 + +import ( + monitoringv1 "github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring/v1" + corev1 "k8s.io/api/core/v1" + intstr "k8s.io/apimachinery/pkg/util/intstr" +) + +// EndpointApplyConfiguration represents a declarative configuration of the Endpoint type for use +// with apply. +type EndpointApplyConfiguration struct { + Port *string `json:"port,omitempty"` + TargetPort *intstr.IntOrString `json:"targetPort,omitempty"` + Path *string `json:"path,omitempty"` + Scheme *monitoringv1.Scheme `json:"scheme,omitempty"` + Params map[string][]string `json:"params,omitempty"` + Interval *monitoringv1.Duration `json:"interval,omitempty"` + ScrapeTimeout *monitoringv1.Duration `json:"scrapeTimeout,omitempty"` + HonorLabels *bool `json:"honorLabels,omitempty"` + HonorTimestamps *bool `json:"honorTimestamps,omitempty"` + TrackTimestampsStaleness *bool `json:"trackTimestampsStaleness,omitempty"` + MetricRelabelConfigs []RelabelConfigApplyConfiguration `json:"metricRelabelings,omitempty"` + RelabelConfigs []RelabelConfigApplyConfiguration `json:"relabelings,omitempty"` + FilterRunning *bool `json:"filterRunning,omitempty"` + BearerTokenFile *string `json:"bearerTokenFile,omitempty"` + HTTPConfigWithProxyAndTLSFilesApplyConfiguration `json:",inline"` +} + +// EndpointApplyConfiguration constructs a declarative configuration of the Endpoint type for use with +// apply. +func Endpoint() *EndpointApplyConfiguration { + return &EndpointApplyConfiguration{} +} + +// WithPort sets the Port field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Port field is set to the value of the last call. +func (b *EndpointApplyConfiguration) WithPort(value string) *EndpointApplyConfiguration { + b.Port = &value + return b +} + +// WithTargetPort sets the TargetPort field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the TargetPort field is set to the value of the last call. +func (b *EndpointApplyConfiguration) WithTargetPort(value intstr.IntOrString) *EndpointApplyConfiguration { + b.TargetPort = &value + return b +} + +// WithPath sets the Path field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Path field is set to the value of the last call. +func (b *EndpointApplyConfiguration) WithPath(value string) *EndpointApplyConfiguration { + b.Path = &value + return b +} + +// WithScheme sets the Scheme field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Scheme field is set to the value of the last call. +func (b *EndpointApplyConfiguration) WithScheme(value monitoringv1.Scheme) *EndpointApplyConfiguration { + b.Scheme = &value + return b +} + +// WithParams puts the entries into the Params field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, the entries provided by each call will be put on the Params field, +// overwriting an existing map entries in Params field with the same key. +func (b *EndpointApplyConfiguration) WithParams(entries map[string][]string) *EndpointApplyConfiguration { + if b.Params == nil && len(entries) > 0 { + b.Params = make(map[string][]string, len(entries)) + } + for k, v := range entries { + b.Params[k] = v + } + return b +} + +// WithInterval sets the Interval field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Interval field is set to the value of the last call. +func (b *EndpointApplyConfiguration) WithInterval(value monitoringv1.Duration) *EndpointApplyConfiguration { + b.Interval = &value + return b +} + +// WithScrapeTimeout sets the ScrapeTimeout field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the ScrapeTimeout field is set to the value of the last call. +func (b *EndpointApplyConfiguration) WithScrapeTimeout(value monitoringv1.Duration) *EndpointApplyConfiguration { + b.ScrapeTimeout = &value + return b +} + +// WithHonorLabels sets the HonorLabels field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the HonorLabels field is set to the value of the last call. +func (b *EndpointApplyConfiguration) WithHonorLabels(value bool) *EndpointApplyConfiguration { + b.HonorLabels = &value + return b +} + +// WithHonorTimestamps sets the HonorTimestamps field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the HonorTimestamps field is set to the value of the last call. +func (b *EndpointApplyConfiguration) WithHonorTimestamps(value bool) *EndpointApplyConfiguration { + b.HonorTimestamps = &value + return b +} + +// WithTrackTimestampsStaleness sets the TrackTimestampsStaleness field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the TrackTimestampsStaleness field is set to the value of the last call. +func (b *EndpointApplyConfiguration) WithTrackTimestampsStaleness(value bool) *EndpointApplyConfiguration { + b.TrackTimestampsStaleness = &value + return b +} + +// WithMetricRelabelConfigs adds the given value to the MetricRelabelConfigs field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the MetricRelabelConfigs field. +func (b *EndpointApplyConfiguration) WithMetricRelabelConfigs(values ...*RelabelConfigApplyConfiguration) *EndpointApplyConfiguration { + for i := range values { + if values[i] == nil { + panic("nil value passed to WithMetricRelabelConfigs") + } + b.MetricRelabelConfigs = append(b.MetricRelabelConfigs, *values[i]) + } + return b +} + +// WithRelabelConfigs adds the given value to the RelabelConfigs field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the RelabelConfigs field. +func (b *EndpointApplyConfiguration) WithRelabelConfigs(values ...*RelabelConfigApplyConfiguration) *EndpointApplyConfiguration { + for i := range values { + if values[i] == nil { + panic("nil value passed to WithRelabelConfigs") + } + b.RelabelConfigs = append(b.RelabelConfigs, *values[i]) + } + return b +} + +// WithFilterRunning sets the FilterRunning field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the FilterRunning field is set to the value of the last call. +func (b *EndpointApplyConfiguration) WithFilterRunning(value bool) *EndpointApplyConfiguration { + b.FilterRunning = &value + return b +} + +// WithBearerTokenFile sets the BearerTokenFile field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the BearerTokenFile field is set to the value of the last call. +func (b *EndpointApplyConfiguration) WithBearerTokenFile(value string) *EndpointApplyConfiguration { + b.BearerTokenFile = &value + return b +} + +// WithAuthorization sets the Authorization field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Authorization field is set to the value of the last call. +func (b *EndpointApplyConfiguration) WithAuthorization(value *SafeAuthorizationApplyConfiguration) *EndpointApplyConfiguration { + b.HTTPConfigWithoutTLSApplyConfiguration.Authorization = value + return b +} + +// WithBasicAuth sets the BasicAuth field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the BasicAuth field is set to the value of the last call. +func (b *EndpointApplyConfiguration) WithBasicAuth(value *BasicAuthApplyConfiguration) *EndpointApplyConfiguration { + b.HTTPConfigWithoutTLSApplyConfiguration.BasicAuth = value + return b +} + +// WithOAuth2 sets the OAuth2 field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the OAuth2 field is set to the value of the last call. +func (b *EndpointApplyConfiguration) WithOAuth2(value *OAuth2ApplyConfiguration) *EndpointApplyConfiguration { + b.HTTPConfigWithoutTLSApplyConfiguration.OAuth2 = value + return b +} + +// WithBearerTokenSecret sets the BearerTokenSecret field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the BearerTokenSecret field is set to the value of the last call. +func (b *EndpointApplyConfiguration) WithBearerTokenSecret(value corev1.SecretKeySelector) *EndpointApplyConfiguration { + b.HTTPConfigWithoutTLSApplyConfiguration.BearerTokenSecret = &value + return b +} + +// WithFollowRedirects sets the FollowRedirects field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the FollowRedirects field is set to the value of the last call. +func (b *EndpointApplyConfiguration) WithFollowRedirects(value bool) *EndpointApplyConfiguration { + b.HTTPConfigWithoutTLSApplyConfiguration.FollowRedirects = &value + return b +} + +// WithEnableHTTP2 sets the EnableHTTP2 field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the EnableHTTP2 field is set to the value of the last call. +func (b *EndpointApplyConfiguration) WithEnableHTTP2(value bool) *EndpointApplyConfiguration { + b.HTTPConfigWithoutTLSApplyConfiguration.EnableHTTP2 = &value + return b +} + +// WithTLSConfig sets the TLSConfig field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the TLSConfig field is set to the value of the last call. +func (b *EndpointApplyConfiguration) WithTLSConfig(value *TLSConfigApplyConfiguration) *EndpointApplyConfiguration { + b.HTTPConfigWithTLSFilesApplyConfiguration.TLSConfig = value + return b +} + +// WithProxyURL sets the ProxyURL field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the ProxyURL field is set to the value of the last call. +func (b *EndpointApplyConfiguration) WithProxyURL(value string) *EndpointApplyConfiguration { + b.ProxyConfigApplyConfiguration.ProxyURL = &value + return b +} + +// WithNoProxy sets the NoProxy field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the NoProxy field is set to the value of the last call. +func (b *EndpointApplyConfiguration) WithNoProxy(value string) *EndpointApplyConfiguration { + b.ProxyConfigApplyConfiguration.NoProxy = &value + return b +} + +// WithProxyFromEnvironment sets the ProxyFromEnvironment field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the ProxyFromEnvironment field is set to the value of the last call. +func (b *EndpointApplyConfiguration) WithProxyFromEnvironment(value bool) *EndpointApplyConfiguration { + b.ProxyConfigApplyConfiguration.ProxyFromEnvironment = &value + return b +} + +// WithProxyConnectHeader puts the entries into the ProxyConnectHeader field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, the entries provided by each call will be put on the ProxyConnectHeader field, +// overwriting an existing map entries in ProxyConnectHeader field with the same key. +func (b *EndpointApplyConfiguration) WithProxyConnectHeader(entries map[string][]corev1.SecretKeySelector) *EndpointApplyConfiguration { + if b.ProxyConfigApplyConfiguration.ProxyConnectHeader == nil && len(entries) > 0 { + b.ProxyConfigApplyConfiguration.ProxyConnectHeader = make(map[string][]corev1.SecretKeySelector, len(entries)) + } + for k, v := range entries { + b.ProxyConfigApplyConfiguration.ProxyConnectHeader[k] = v + } + return b +} diff --git a/vendor/github.com/prometheus-operator/prometheus-operator/pkg/client/applyconfiguration/monitoring/v1/exemplars.go b/vendor/github.com/prometheus-operator/prometheus-operator/pkg/client/applyconfiguration/monitoring/v1/exemplars.go new file mode 100644 index 0000000000..5fbd82890c --- /dev/null +++ b/vendor/github.com/prometheus-operator/prometheus-operator/pkg/client/applyconfiguration/monitoring/v1/exemplars.go @@ -0,0 +1,37 @@ +// Copyright The prometheus-operator Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1 + +// ExemplarsApplyConfiguration represents a declarative configuration of the Exemplars type for use +// with apply. +type ExemplarsApplyConfiguration struct { + MaxSize *int64 `json:"maxSize,omitempty"` +} + +// ExemplarsApplyConfiguration constructs a declarative configuration of the Exemplars type for use with +// apply. +func Exemplars() *ExemplarsApplyConfiguration { + return &ExemplarsApplyConfiguration{} +} + +// WithMaxSize sets the MaxSize field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the MaxSize field is set to the value of the last call. +func (b *ExemplarsApplyConfiguration) WithMaxSize(value int64) *ExemplarsApplyConfiguration { + b.MaxSize = &value + return b +} diff --git a/vendor/github.com/prometheus-operator/prometheus-operator/pkg/client/applyconfiguration/monitoring/v1/globaljiraconfig.go b/vendor/github.com/prometheus-operator/prometheus-operator/pkg/client/applyconfiguration/monitoring/v1/globaljiraconfig.go new file mode 100644 index 0000000000..5d10fe6f51 --- /dev/null +++ b/vendor/github.com/prometheus-operator/prometheus-operator/pkg/client/applyconfiguration/monitoring/v1/globaljiraconfig.go @@ -0,0 +1,41 @@ +// Copyright The prometheus-operator Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1 + +import ( + monitoringv1 "github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring/v1" +) + +// GlobalJiraConfigApplyConfiguration represents a declarative configuration of the GlobalJiraConfig type for use +// with apply. +type GlobalJiraConfigApplyConfiguration struct { + APIURL *monitoringv1.URL `json:"apiURL,omitempty"` +} + +// GlobalJiraConfigApplyConfiguration constructs a declarative configuration of the GlobalJiraConfig type for use with +// apply. +func GlobalJiraConfig() *GlobalJiraConfigApplyConfiguration { + return &GlobalJiraConfigApplyConfiguration{} +} + +// WithAPIURL sets the APIURL field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the APIURL field is set to the value of the last call. +func (b *GlobalJiraConfigApplyConfiguration) WithAPIURL(value monitoringv1.URL) *GlobalJiraConfigApplyConfiguration { + b.APIURL = &value + return b +} diff --git a/vendor/github.com/prometheus-operator/prometheus-operator/pkg/client/applyconfiguration/monitoring/v1/globalrocketchatconfig.go b/vendor/github.com/prometheus-operator/prometheus-operator/pkg/client/applyconfiguration/monitoring/v1/globalrocketchatconfig.go new file mode 100644 index 0000000000..ab64ed9b2a --- /dev/null +++ b/vendor/github.com/prometheus-operator/prometheus-operator/pkg/client/applyconfiguration/monitoring/v1/globalrocketchatconfig.go @@ -0,0 +1,60 @@ +// Copyright The prometheus-operator Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1 + +import ( + monitoringv1 "github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring/v1" + corev1 "k8s.io/api/core/v1" +) + +// GlobalRocketChatConfigApplyConfiguration represents a declarative configuration of the GlobalRocketChatConfig type for use +// with apply. +type GlobalRocketChatConfigApplyConfiguration struct { + APIURL *monitoringv1.URL `json:"apiURL,omitempty"` + Token *corev1.SecretKeySelector `json:"token,omitempty"` + TokenID *corev1.SecretKeySelector `json:"tokenID,omitempty"` +} + +// GlobalRocketChatConfigApplyConfiguration constructs a declarative configuration of the GlobalRocketChatConfig type for use with +// apply. +func GlobalRocketChatConfig() *GlobalRocketChatConfigApplyConfiguration { + return &GlobalRocketChatConfigApplyConfiguration{} +} + +// WithAPIURL sets the APIURL field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the APIURL field is set to the value of the last call. +func (b *GlobalRocketChatConfigApplyConfiguration) WithAPIURL(value monitoringv1.URL) *GlobalRocketChatConfigApplyConfiguration { + b.APIURL = &value + return b +} + +// WithToken sets the Token field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Token field is set to the value of the last call. +func (b *GlobalRocketChatConfigApplyConfiguration) WithToken(value corev1.SecretKeySelector) *GlobalRocketChatConfigApplyConfiguration { + b.Token = &value + return b +} + +// WithTokenID sets the TokenID field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the TokenID field is set to the value of the last call. +func (b *GlobalRocketChatConfigApplyConfiguration) WithTokenID(value corev1.SecretKeySelector) *GlobalRocketChatConfigApplyConfiguration { + b.TokenID = &value + return b +} diff --git a/vendor/github.com/prometheus-operator/prometheus-operator/pkg/client/applyconfiguration/monitoring/v1/globalsmtpconfig.go b/vendor/github.com/prometheus-operator/prometheus-operator/pkg/client/applyconfiguration/monitoring/v1/globalsmtpconfig.go new file mode 100644 index 0000000000..5812d334bd --- /dev/null +++ b/vendor/github.com/prometheus-operator/prometheus-operator/pkg/client/applyconfiguration/monitoring/v1/globalsmtpconfig.go @@ -0,0 +1,113 @@ +// Copyright The prometheus-operator Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1 + +import ( + corev1 "k8s.io/api/core/v1" +) + +// GlobalSMTPConfigApplyConfiguration represents a declarative configuration of the GlobalSMTPConfig type for use +// with apply. +type GlobalSMTPConfigApplyConfiguration struct { + From *string `json:"from,omitempty"` + SmartHost *HostPortApplyConfiguration `json:"smartHost,omitempty"` + Hello *string `json:"hello,omitempty"` + AuthUsername *string `json:"authUsername,omitempty"` + AuthPassword *corev1.SecretKeySelector `json:"authPassword,omitempty"` + AuthIdentity *string `json:"authIdentity,omitempty"` + AuthSecret *corev1.SecretKeySelector `json:"authSecret,omitempty"` + RequireTLS *bool `json:"requireTLS,omitempty"` + TLSConfig *SafeTLSConfigApplyConfiguration `json:"tlsConfig,omitempty"` +} + +// GlobalSMTPConfigApplyConfiguration constructs a declarative configuration of the GlobalSMTPConfig type for use with +// apply. +func GlobalSMTPConfig() *GlobalSMTPConfigApplyConfiguration { + return &GlobalSMTPConfigApplyConfiguration{} +} + +// WithFrom sets the From field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the From field is set to the value of the last call. +func (b *GlobalSMTPConfigApplyConfiguration) WithFrom(value string) *GlobalSMTPConfigApplyConfiguration { + b.From = &value + return b +} + +// WithSmartHost sets the SmartHost field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the SmartHost field is set to the value of the last call. +func (b *GlobalSMTPConfigApplyConfiguration) WithSmartHost(value *HostPortApplyConfiguration) *GlobalSMTPConfigApplyConfiguration { + b.SmartHost = value + return b +} + +// WithHello sets the Hello field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Hello field is set to the value of the last call. +func (b *GlobalSMTPConfigApplyConfiguration) WithHello(value string) *GlobalSMTPConfigApplyConfiguration { + b.Hello = &value + return b +} + +// WithAuthUsername sets the AuthUsername field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the AuthUsername field is set to the value of the last call. +func (b *GlobalSMTPConfigApplyConfiguration) WithAuthUsername(value string) *GlobalSMTPConfigApplyConfiguration { + b.AuthUsername = &value + return b +} + +// WithAuthPassword sets the AuthPassword field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the AuthPassword field is set to the value of the last call. +func (b *GlobalSMTPConfigApplyConfiguration) WithAuthPassword(value corev1.SecretKeySelector) *GlobalSMTPConfigApplyConfiguration { + b.AuthPassword = &value + return b +} + +// WithAuthIdentity sets the AuthIdentity field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the AuthIdentity field is set to the value of the last call. +func (b *GlobalSMTPConfigApplyConfiguration) WithAuthIdentity(value string) *GlobalSMTPConfigApplyConfiguration { + b.AuthIdentity = &value + return b +} + +// WithAuthSecret sets the AuthSecret field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the AuthSecret field is set to the value of the last call. +func (b *GlobalSMTPConfigApplyConfiguration) WithAuthSecret(value corev1.SecretKeySelector) *GlobalSMTPConfigApplyConfiguration { + b.AuthSecret = &value + return b +} + +// WithRequireTLS sets the RequireTLS field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the RequireTLS field is set to the value of the last call. +func (b *GlobalSMTPConfigApplyConfiguration) WithRequireTLS(value bool) *GlobalSMTPConfigApplyConfiguration { + b.RequireTLS = &value + return b +} + +// WithTLSConfig sets the TLSConfig field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the TLSConfig field is set to the value of the last call. +func (b *GlobalSMTPConfigApplyConfiguration) WithTLSConfig(value *SafeTLSConfigApplyConfiguration) *GlobalSMTPConfigApplyConfiguration { + b.TLSConfig = value + return b +} diff --git a/vendor/github.com/prometheus-operator/prometheus-operator/pkg/client/applyconfiguration/monitoring/v1/globaltelegramconfig.go b/vendor/github.com/prometheus-operator/prometheus-operator/pkg/client/applyconfiguration/monitoring/v1/globaltelegramconfig.go new file mode 100644 index 0000000000..ea7a8581ba --- /dev/null +++ b/vendor/github.com/prometheus-operator/prometheus-operator/pkg/client/applyconfiguration/monitoring/v1/globaltelegramconfig.go @@ -0,0 +1,41 @@ +// Copyright The prometheus-operator Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1 + +import ( + monitoringv1 "github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring/v1" +) + +// GlobalTelegramConfigApplyConfiguration represents a declarative configuration of the GlobalTelegramConfig type for use +// with apply. +type GlobalTelegramConfigApplyConfiguration struct { + APIURL *monitoringv1.URL `json:"apiURL,omitempty"` +} + +// GlobalTelegramConfigApplyConfiguration constructs a declarative configuration of the GlobalTelegramConfig type for use with +// apply. +func GlobalTelegramConfig() *GlobalTelegramConfigApplyConfiguration { + return &GlobalTelegramConfigApplyConfiguration{} +} + +// WithAPIURL sets the APIURL field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the APIURL field is set to the value of the last call. +func (b *GlobalTelegramConfigApplyConfiguration) WithAPIURL(value monitoringv1.URL) *GlobalTelegramConfigApplyConfiguration { + b.APIURL = &value + return b +} diff --git a/vendor/github.com/prometheus-operator/prometheus-operator/pkg/client/applyconfiguration/monitoring/v1/globalvictoropsconfig.go b/vendor/github.com/prometheus-operator/prometheus-operator/pkg/client/applyconfiguration/monitoring/v1/globalvictoropsconfig.go new file mode 100644 index 0000000000..544c221af5 --- /dev/null +++ b/vendor/github.com/prometheus-operator/prometheus-operator/pkg/client/applyconfiguration/monitoring/v1/globalvictoropsconfig.go @@ -0,0 +1,51 @@ +// Copyright The prometheus-operator Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1 + +import ( + monitoringv1 "github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring/v1" + corev1 "k8s.io/api/core/v1" +) + +// GlobalVictorOpsConfigApplyConfiguration represents a declarative configuration of the GlobalVictorOpsConfig type for use +// with apply. +type GlobalVictorOpsConfigApplyConfiguration struct { + APIURL *monitoringv1.URL `json:"apiURL,omitempty"` + APIKey *corev1.SecretKeySelector `json:"apiKey,omitempty"` +} + +// GlobalVictorOpsConfigApplyConfiguration constructs a declarative configuration of the GlobalVictorOpsConfig type for use with +// apply. +func GlobalVictorOpsConfig() *GlobalVictorOpsConfigApplyConfiguration { + return &GlobalVictorOpsConfigApplyConfiguration{} +} + +// WithAPIURL sets the APIURL field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the APIURL field is set to the value of the last call. +func (b *GlobalVictorOpsConfigApplyConfiguration) WithAPIURL(value monitoringv1.URL) *GlobalVictorOpsConfigApplyConfiguration { + b.APIURL = &value + return b +} + +// WithAPIKey sets the APIKey field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the APIKey field is set to the value of the last call. +func (b *GlobalVictorOpsConfigApplyConfiguration) WithAPIKey(value corev1.SecretKeySelector) *GlobalVictorOpsConfigApplyConfiguration { + b.APIKey = &value + return b +} diff --git a/vendor/github.com/prometheus-operator/prometheus-operator/pkg/client/applyconfiguration/monitoring/v1/globalwebexconfig.go b/vendor/github.com/prometheus-operator/prometheus-operator/pkg/client/applyconfiguration/monitoring/v1/globalwebexconfig.go new file mode 100644 index 0000000000..fb39e0e990 --- /dev/null +++ b/vendor/github.com/prometheus-operator/prometheus-operator/pkg/client/applyconfiguration/monitoring/v1/globalwebexconfig.go @@ -0,0 +1,41 @@ +// Copyright The prometheus-operator Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1 + +import ( + monitoringv1 "github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring/v1" +) + +// GlobalWebexConfigApplyConfiguration represents a declarative configuration of the GlobalWebexConfig type for use +// with apply. +type GlobalWebexConfigApplyConfiguration struct { + APIURL *monitoringv1.URL `json:"apiURL,omitempty"` +} + +// GlobalWebexConfigApplyConfiguration constructs a declarative configuration of the GlobalWebexConfig type for use with +// apply. +func GlobalWebexConfig() *GlobalWebexConfigApplyConfiguration { + return &GlobalWebexConfigApplyConfiguration{} +} + +// WithAPIURL sets the APIURL field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the APIURL field is set to the value of the last call. +func (b *GlobalWebexConfigApplyConfiguration) WithAPIURL(value monitoringv1.URL) *GlobalWebexConfigApplyConfiguration { + b.APIURL = &value + return b +} diff --git a/vendor/github.com/prometheus-operator/prometheus-operator/pkg/client/applyconfiguration/monitoring/v1/globalwechatconfig.go b/vendor/github.com/prometheus-operator/prometheus-operator/pkg/client/applyconfiguration/monitoring/v1/globalwechatconfig.go new file mode 100644 index 0000000000..fc247bc954 --- /dev/null +++ b/vendor/github.com/prometheus-operator/prometheus-operator/pkg/client/applyconfiguration/monitoring/v1/globalwechatconfig.go @@ -0,0 +1,60 @@ +// Copyright The prometheus-operator Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1 + +import ( + monitoringv1 "github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring/v1" + corev1 "k8s.io/api/core/v1" +) + +// GlobalWeChatConfigApplyConfiguration represents a declarative configuration of the GlobalWeChatConfig type for use +// with apply. +type GlobalWeChatConfigApplyConfiguration struct { + APIURL *monitoringv1.URL `json:"apiURL,omitempty"` + APISecret *corev1.SecretKeySelector `json:"apiSecret,omitempty"` + APICorpID *string `json:"apiCorpID,omitempty"` +} + +// GlobalWeChatConfigApplyConfiguration constructs a declarative configuration of the GlobalWeChatConfig type for use with +// apply. +func GlobalWeChatConfig() *GlobalWeChatConfigApplyConfiguration { + return &GlobalWeChatConfigApplyConfiguration{} +} + +// WithAPIURL sets the APIURL field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the APIURL field is set to the value of the last call. +func (b *GlobalWeChatConfigApplyConfiguration) WithAPIURL(value monitoringv1.URL) *GlobalWeChatConfigApplyConfiguration { + b.APIURL = &value + return b +} + +// WithAPISecret sets the APISecret field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the APISecret field is set to the value of the last call. +func (b *GlobalWeChatConfigApplyConfiguration) WithAPISecret(value corev1.SecretKeySelector) *GlobalWeChatConfigApplyConfiguration { + b.APISecret = &value + return b +} + +// WithAPICorpID sets the APICorpID field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the APICorpID field is set to the value of the last call. +func (b *GlobalWeChatConfigApplyConfiguration) WithAPICorpID(value string) *GlobalWeChatConfigApplyConfiguration { + b.APICorpID = &value + return b +} diff --git a/vendor/github.com/prometheus-operator/prometheus-operator/pkg/client/applyconfiguration/monitoring/v1/hostalias.go b/vendor/github.com/prometheus-operator/prometheus-operator/pkg/client/applyconfiguration/monitoring/v1/hostalias.go new file mode 100644 index 0000000000..023503a569 --- /dev/null +++ b/vendor/github.com/prometheus-operator/prometheus-operator/pkg/client/applyconfiguration/monitoring/v1/hostalias.go @@ -0,0 +1,48 @@ +// Copyright The prometheus-operator Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1 + +// HostAliasApplyConfiguration represents a declarative configuration of the HostAlias type for use +// with apply. +type HostAliasApplyConfiguration struct { + IP *string `json:"ip,omitempty"` + Hostnames []string `json:"hostnames,omitempty"` +} + +// HostAliasApplyConfiguration constructs a declarative configuration of the HostAlias type for use with +// apply. +func HostAlias() *HostAliasApplyConfiguration { + return &HostAliasApplyConfiguration{} +} + +// WithIP sets the IP field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the IP field is set to the value of the last call. +func (b *HostAliasApplyConfiguration) WithIP(value string) *HostAliasApplyConfiguration { + b.IP = &value + return b +} + +// WithHostnames adds the given value to the Hostnames field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the Hostnames field. +func (b *HostAliasApplyConfiguration) WithHostnames(values ...string) *HostAliasApplyConfiguration { + for i := range values { + b.Hostnames = append(b.Hostnames, values[i]) + } + return b +} diff --git a/vendor/github.com/prometheus-operator/prometheus-operator/pkg/client/applyconfiguration/monitoring/v1/hostport.go b/vendor/github.com/prometheus-operator/prometheus-operator/pkg/client/applyconfiguration/monitoring/v1/hostport.go new file mode 100644 index 0000000000..ff4e143e0d --- /dev/null +++ b/vendor/github.com/prometheus-operator/prometheus-operator/pkg/client/applyconfiguration/monitoring/v1/hostport.go @@ -0,0 +1,46 @@ +// Copyright The prometheus-operator Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1 + +// HostPortApplyConfiguration represents a declarative configuration of the HostPort type for use +// with apply. +type HostPortApplyConfiguration struct { + Host *string `json:"host,omitempty"` + Port *string `json:"port,omitempty"` +} + +// HostPortApplyConfiguration constructs a declarative configuration of the HostPort type for use with +// apply. +func HostPort() *HostPortApplyConfiguration { + return &HostPortApplyConfiguration{} +} + +// WithHost sets the Host field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Host field is set to the value of the last call. +func (b *HostPortApplyConfiguration) WithHost(value string) *HostPortApplyConfiguration { + b.Host = &value + return b +} + +// WithPort sets the Port field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Port field is set to the value of the last call. +func (b *HostPortApplyConfiguration) WithPort(value string) *HostPortApplyConfiguration { + b.Port = &value + return b +} diff --git a/vendor/github.com/prometheus-operator/prometheus-operator/pkg/client/applyconfiguration/monitoring/v1/httpconfig.go b/vendor/github.com/prometheus-operator/prometheus-operator/pkg/client/applyconfiguration/monitoring/v1/httpconfig.go new file mode 100644 index 0000000000..b2eddc0eca --- /dev/null +++ b/vendor/github.com/prometheus-operator/prometheus-operator/pkg/client/applyconfiguration/monitoring/v1/httpconfig.go @@ -0,0 +1,90 @@ +// Copyright The prometheus-operator Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1 + +import ( + corev1 "k8s.io/api/core/v1" +) + +// HTTPConfigApplyConfiguration represents a declarative configuration of the HTTPConfig type for use +// with apply. +type HTTPConfigApplyConfiguration struct { + HTTPConfigWithoutTLSApplyConfiguration `json:",inline"` + TLSConfig *SafeTLSConfigApplyConfiguration `json:"tlsConfig,omitempty"` +} + +// HTTPConfigApplyConfiguration constructs a declarative configuration of the HTTPConfig type for use with +// apply. +func HTTPConfig() *HTTPConfigApplyConfiguration { + return &HTTPConfigApplyConfiguration{} +} + +// WithAuthorization sets the Authorization field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Authorization field is set to the value of the last call. +func (b *HTTPConfigApplyConfiguration) WithAuthorization(value *SafeAuthorizationApplyConfiguration) *HTTPConfigApplyConfiguration { + b.HTTPConfigWithoutTLSApplyConfiguration.Authorization = value + return b +} + +// WithBasicAuth sets the BasicAuth field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the BasicAuth field is set to the value of the last call. +func (b *HTTPConfigApplyConfiguration) WithBasicAuth(value *BasicAuthApplyConfiguration) *HTTPConfigApplyConfiguration { + b.HTTPConfigWithoutTLSApplyConfiguration.BasicAuth = value + return b +} + +// WithOAuth2 sets the OAuth2 field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the OAuth2 field is set to the value of the last call. +func (b *HTTPConfigApplyConfiguration) WithOAuth2(value *OAuth2ApplyConfiguration) *HTTPConfigApplyConfiguration { + b.HTTPConfigWithoutTLSApplyConfiguration.OAuth2 = value + return b +} + +// WithBearerTokenSecret sets the BearerTokenSecret field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the BearerTokenSecret field is set to the value of the last call. +func (b *HTTPConfigApplyConfiguration) WithBearerTokenSecret(value corev1.SecretKeySelector) *HTTPConfigApplyConfiguration { + b.HTTPConfigWithoutTLSApplyConfiguration.BearerTokenSecret = &value + return b +} + +// WithFollowRedirects sets the FollowRedirects field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the FollowRedirects field is set to the value of the last call. +func (b *HTTPConfigApplyConfiguration) WithFollowRedirects(value bool) *HTTPConfigApplyConfiguration { + b.HTTPConfigWithoutTLSApplyConfiguration.FollowRedirects = &value + return b +} + +// WithEnableHTTP2 sets the EnableHTTP2 field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the EnableHTTP2 field is set to the value of the last call. +func (b *HTTPConfigApplyConfiguration) WithEnableHTTP2(value bool) *HTTPConfigApplyConfiguration { + b.HTTPConfigWithoutTLSApplyConfiguration.EnableHTTP2 = &value + return b +} + +// WithTLSConfig sets the TLSConfig field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the TLSConfig field is set to the value of the last call. +func (b *HTTPConfigApplyConfiguration) WithTLSConfig(value *SafeTLSConfigApplyConfiguration) *HTTPConfigApplyConfiguration { + b.TLSConfig = value + return b +} diff --git a/vendor/github.com/prometheus-operator/prometheus-operator/pkg/client/applyconfiguration/monitoring/v1/httpconfigwithouttls.go b/vendor/github.com/prometheus-operator/prometheus-operator/pkg/client/applyconfiguration/monitoring/v1/httpconfigwithouttls.go new file mode 100644 index 0000000000..2c4e7835e5 --- /dev/null +++ b/vendor/github.com/prometheus-operator/prometheus-operator/pkg/client/applyconfiguration/monitoring/v1/httpconfigwithouttls.go @@ -0,0 +1,86 @@ +// Copyright The prometheus-operator Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1 + +import ( + corev1 "k8s.io/api/core/v1" +) + +// HTTPConfigWithoutTLSApplyConfiguration represents a declarative configuration of the HTTPConfigWithoutTLS type for use +// with apply. +type HTTPConfigWithoutTLSApplyConfiguration struct { + Authorization *SafeAuthorizationApplyConfiguration `json:"authorization,omitempty"` + BasicAuth *BasicAuthApplyConfiguration `json:"basicAuth,omitempty"` + OAuth2 *OAuth2ApplyConfiguration `json:"oauth2,omitempty"` + BearerTokenSecret *corev1.SecretKeySelector `json:"bearerTokenSecret,omitempty"` + FollowRedirects *bool `json:"followRedirects,omitempty"` + EnableHTTP2 *bool `json:"enableHttp2,omitempty"` +} + +// HTTPConfigWithoutTLSApplyConfiguration constructs a declarative configuration of the HTTPConfigWithoutTLS type for use with +// apply. +func HTTPConfigWithoutTLS() *HTTPConfigWithoutTLSApplyConfiguration { + return &HTTPConfigWithoutTLSApplyConfiguration{} +} + +// WithAuthorization sets the Authorization field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Authorization field is set to the value of the last call. +func (b *HTTPConfigWithoutTLSApplyConfiguration) WithAuthorization(value *SafeAuthorizationApplyConfiguration) *HTTPConfigWithoutTLSApplyConfiguration { + b.Authorization = value + return b +} + +// WithBasicAuth sets the BasicAuth field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the BasicAuth field is set to the value of the last call. +func (b *HTTPConfigWithoutTLSApplyConfiguration) WithBasicAuth(value *BasicAuthApplyConfiguration) *HTTPConfigWithoutTLSApplyConfiguration { + b.BasicAuth = value + return b +} + +// WithOAuth2 sets the OAuth2 field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the OAuth2 field is set to the value of the last call. +func (b *HTTPConfigWithoutTLSApplyConfiguration) WithOAuth2(value *OAuth2ApplyConfiguration) *HTTPConfigWithoutTLSApplyConfiguration { + b.OAuth2 = value + return b +} + +// WithBearerTokenSecret sets the BearerTokenSecret field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the BearerTokenSecret field is set to the value of the last call. +func (b *HTTPConfigWithoutTLSApplyConfiguration) WithBearerTokenSecret(value corev1.SecretKeySelector) *HTTPConfigWithoutTLSApplyConfiguration { + b.BearerTokenSecret = &value + return b +} + +// WithFollowRedirects sets the FollowRedirects field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the FollowRedirects field is set to the value of the last call. +func (b *HTTPConfigWithoutTLSApplyConfiguration) WithFollowRedirects(value bool) *HTTPConfigWithoutTLSApplyConfiguration { + b.FollowRedirects = &value + return b +} + +// WithEnableHTTP2 sets the EnableHTTP2 field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the EnableHTTP2 field is set to the value of the last call. +func (b *HTTPConfigWithoutTLSApplyConfiguration) WithEnableHTTP2(value bool) *HTTPConfigWithoutTLSApplyConfiguration { + b.EnableHTTP2 = &value + return b +} diff --git a/vendor/github.com/prometheus-operator/prometheus-operator/pkg/client/applyconfiguration/monitoring/v1/httpconfigwithproxy.go b/vendor/github.com/prometheus-operator/prometheus-operator/pkg/client/applyconfiguration/monitoring/v1/httpconfigwithproxy.go new file mode 100644 index 0000000000..9e4666d233 --- /dev/null +++ b/vendor/github.com/prometheus-operator/prometheus-operator/pkg/client/applyconfiguration/monitoring/v1/httpconfigwithproxy.go @@ -0,0 +1,128 @@ +// Copyright The prometheus-operator Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1 + +import ( + corev1 "k8s.io/api/core/v1" +) + +// HTTPConfigWithProxyApplyConfiguration represents a declarative configuration of the HTTPConfigWithProxy type for use +// with apply. +type HTTPConfigWithProxyApplyConfiguration struct { + HTTPConfigApplyConfiguration `json:",inline"` + ProxyConfigApplyConfiguration `json:",inline"` +} + +// HTTPConfigWithProxyApplyConfiguration constructs a declarative configuration of the HTTPConfigWithProxy type for use with +// apply. +func HTTPConfigWithProxy() *HTTPConfigWithProxyApplyConfiguration { + return &HTTPConfigWithProxyApplyConfiguration{} +} + +// WithAuthorization sets the Authorization field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Authorization field is set to the value of the last call. +func (b *HTTPConfigWithProxyApplyConfiguration) WithAuthorization(value *SafeAuthorizationApplyConfiguration) *HTTPConfigWithProxyApplyConfiguration { + b.HTTPConfigWithoutTLSApplyConfiguration.Authorization = value + return b +} + +// WithBasicAuth sets the BasicAuth field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the BasicAuth field is set to the value of the last call. +func (b *HTTPConfigWithProxyApplyConfiguration) WithBasicAuth(value *BasicAuthApplyConfiguration) *HTTPConfigWithProxyApplyConfiguration { + b.HTTPConfigWithoutTLSApplyConfiguration.BasicAuth = value + return b +} + +// WithOAuth2 sets the OAuth2 field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the OAuth2 field is set to the value of the last call. +func (b *HTTPConfigWithProxyApplyConfiguration) WithOAuth2(value *OAuth2ApplyConfiguration) *HTTPConfigWithProxyApplyConfiguration { + b.HTTPConfigWithoutTLSApplyConfiguration.OAuth2 = value + return b +} + +// WithBearerTokenSecret sets the BearerTokenSecret field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the BearerTokenSecret field is set to the value of the last call. +func (b *HTTPConfigWithProxyApplyConfiguration) WithBearerTokenSecret(value corev1.SecretKeySelector) *HTTPConfigWithProxyApplyConfiguration { + b.HTTPConfigWithoutTLSApplyConfiguration.BearerTokenSecret = &value + return b +} + +// WithFollowRedirects sets the FollowRedirects field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the FollowRedirects field is set to the value of the last call. +func (b *HTTPConfigWithProxyApplyConfiguration) WithFollowRedirects(value bool) *HTTPConfigWithProxyApplyConfiguration { + b.HTTPConfigWithoutTLSApplyConfiguration.FollowRedirects = &value + return b +} + +// WithEnableHTTP2 sets the EnableHTTP2 field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the EnableHTTP2 field is set to the value of the last call. +func (b *HTTPConfigWithProxyApplyConfiguration) WithEnableHTTP2(value bool) *HTTPConfigWithProxyApplyConfiguration { + b.HTTPConfigWithoutTLSApplyConfiguration.EnableHTTP2 = &value + return b +} + +// WithTLSConfig sets the TLSConfig field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the TLSConfig field is set to the value of the last call. +func (b *HTTPConfigWithProxyApplyConfiguration) WithTLSConfig(value *SafeTLSConfigApplyConfiguration) *HTTPConfigWithProxyApplyConfiguration { + b.HTTPConfigApplyConfiguration.TLSConfig = value + return b +} + +// WithProxyURL sets the ProxyURL field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the ProxyURL field is set to the value of the last call. +func (b *HTTPConfigWithProxyApplyConfiguration) WithProxyURL(value string) *HTTPConfigWithProxyApplyConfiguration { + b.ProxyConfigApplyConfiguration.ProxyURL = &value + return b +} + +// WithNoProxy sets the NoProxy field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the NoProxy field is set to the value of the last call. +func (b *HTTPConfigWithProxyApplyConfiguration) WithNoProxy(value string) *HTTPConfigWithProxyApplyConfiguration { + b.ProxyConfigApplyConfiguration.NoProxy = &value + return b +} + +// WithProxyFromEnvironment sets the ProxyFromEnvironment field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the ProxyFromEnvironment field is set to the value of the last call. +func (b *HTTPConfigWithProxyApplyConfiguration) WithProxyFromEnvironment(value bool) *HTTPConfigWithProxyApplyConfiguration { + b.ProxyConfigApplyConfiguration.ProxyFromEnvironment = &value + return b +} + +// WithProxyConnectHeader puts the entries into the ProxyConnectHeader field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, the entries provided by each call will be put on the ProxyConnectHeader field, +// overwriting an existing map entries in ProxyConnectHeader field with the same key. +func (b *HTTPConfigWithProxyApplyConfiguration) WithProxyConnectHeader(entries map[string][]corev1.SecretKeySelector) *HTTPConfigWithProxyApplyConfiguration { + if b.ProxyConfigApplyConfiguration.ProxyConnectHeader == nil && len(entries) > 0 { + b.ProxyConfigApplyConfiguration.ProxyConnectHeader = make(map[string][]corev1.SecretKeySelector, len(entries)) + } + for k, v := range entries { + b.ProxyConfigApplyConfiguration.ProxyConnectHeader[k] = v + } + return b +} diff --git a/vendor/github.com/prometheus-operator/prometheus-operator/pkg/client/applyconfiguration/monitoring/v1/httpconfigwithproxyandtlsfiles.go b/vendor/github.com/prometheus-operator/prometheus-operator/pkg/client/applyconfiguration/monitoring/v1/httpconfigwithproxyandtlsfiles.go new file mode 100644 index 0000000000..eae11b14c7 --- /dev/null +++ b/vendor/github.com/prometheus-operator/prometheus-operator/pkg/client/applyconfiguration/monitoring/v1/httpconfigwithproxyandtlsfiles.go @@ -0,0 +1,128 @@ +// Copyright The prometheus-operator Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1 + +import ( + corev1 "k8s.io/api/core/v1" +) + +// HTTPConfigWithProxyAndTLSFilesApplyConfiguration represents a declarative configuration of the HTTPConfigWithProxyAndTLSFiles type for use +// with apply. +type HTTPConfigWithProxyAndTLSFilesApplyConfiguration struct { + HTTPConfigWithTLSFilesApplyConfiguration `json:",inline"` + ProxyConfigApplyConfiguration `json:",inline"` +} + +// HTTPConfigWithProxyAndTLSFilesApplyConfiguration constructs a declarative configuration of the HTTPConfigWithProxyAndTLSFiles type for use with +// apply. +func HTTPConfigWithProxyAndTLSFiles() *HTTPConfigWithProxyAndTLSFilesApplyConfiguration { + return &HTTPConfigWithProxyAndTLSFilesApplyConfiguration{} +} + +// WithAuthorization sets the Authorization field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Authorization field is set to the value of the last call. +func (b *HTTPConfigWithProxyAndTLSFilesApplyConfiguration) WithAuthorization(value *SafeAuthorizationApplyConfiguration) *HTTPConfigWithProxyAndTLSFilesApplyConfiguration { + b.HTTPConfigWithoutTLSApplyConfiguration.Authorization = value + return b +} + +// WithBasicAuth sets the BasicAuth field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the BasicAuth field is set to the value of the last call. +func (b *HTTPConfigWithProxyAndTLSFilesApplyConfiguration) WithBasicAuth(value *BasicAuthApplyConfiguration) *HTTPConfigWithProxyAndTLSFilesApplyConfiguration { + b.HTTPConfigWithoutTLSApplyConfiguration.BasicAuth = value + return b +} + +// WithOAuth2 sets the OAuth2 field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the OAuth2 field is set to the value of the last call. +func (b *HTTPConfigWithProxyAndTLSFilesApplyConfiguration) WithOAuth2(value *OAuth2ApplyConfiguration) *HTTPConfigWithProxyAndTLSFilesApplyConfiguration { + b.HTTPConfigWithoutTLSApplyConfiguration.OAuth2 = value + return b +} + +// WithBearerTokenSecret sets the BearerTokenSecret field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the BearerTokenSecret field is set to the value of the last call. +func (b *HTTPConfigWithProxyAndTLSFilesApplyConfiguration) WithBearerTokenSecret(value corev1.SecretKeySelector) *HTTPConfigWithProxyAndTLSFilesApplyConfiguration { + b.HTTPConfigWithoutTLSApplyConfiguration.BearerTokenSecret = &value + return b +} + +// WithFollowRedirects sets the FollowRedirects field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the FollowRedirects field is set to the value of the last call. +func (b *HTTPConfigWithProxyAndTLSFilesApplyConfiguration) WithFollowRedirects(value bool) *HTTPConfigWithProxyAndTLSFilesApplyConfiguration { + b.HTTPConfigWithoutTLSApplyConfiguration.FollowRedirects = &value + return b +} + +// WithEnableHTTP2 sets the EnableHTTP2 field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the EnableHTTP2 field is set to the value of the last call. +func (b *HTTPConfigWithProxyAndTLSFilesApplyConfiguration) WithEnableHTTP2(value bool) *HTTPConfigWithProxyAndTLSFilesApplyConfiguration { + b.HTTPConfigWithoutTLSApplyConfiguration.EnableHTTP2 = &value + return b +} + +// WithTLSConfig sets the TLSConfig field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the TLSConfig field is set to the value of the last call. +func (b *HTTPConfigWithProxyAndTLSFilesApplyConfiguration) WithTLSConfig(value *TLSConfigApplyConfiguration) *HTTPConfigWithProxyAndTLSFilesApplyConfiguration { + b.HTTPConfigWithTLSFilesApplyConfiguration.TLSConfig = value + return b +} + +// WithProxyURL sets the ProxyURL field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the ProxyURL field is set to the value of the last call. +func (b *HTTPConfigWithProxyAndTLSFilesApplyConfiguration) WithProxyURL(value string) *HTTPConfigWithProxyAndTLSFilesApplyConfiguration { + b.ProxyConfigApplyConfiguration.ProxyURL = &value + return b +} + +// WithNoProxy sets the NoProxy field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the NoProxy field is set to the value of the last call. +func (b *HTTPConfigWithProxyAndTLSFilesApplyConfiguration) WithNoProxy(value string) *HTTPConfigWithProxyAndTLSFilesApplyConfiguration { + b.ProxyConfigApplyConfiguration.NoProxy = &value + return b +} + +// WithProxyFromEnvironment sets the ProxyFromEnvironment field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the ProxyFromEnvironment field is set to the value of the last call. +func (b *HTTPConfigWithProxyAndTLSFilesApplyConfiguration) WithProxyFromEnvironment(value bool) *HTTPConfigWithProxyAndTLSFilesApplyConfiguration { + b.ProxyConfigApplyConfiguration.ProxyFromEnvironment = &value + return b +} + +// WithProxyConnectHeader puts the entries into the ProxyConnectHeader field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, the entries provided by each call will be put on the ProxyConnectHeader field, +// overwriting an existing map entries in ProxyConnectHeader field with the same key. +func (b *HTTPConfigWithProxyAndTLSFilesApplyConfiguration) WithProxyConnectHeader(entries map[string][]corev1.SecretKeySelector) *HTTPConfigWithProxyAndTLSFilesApplyConfiguration { + if b.ProxyConfigApplyConfiguration.ProxyConnectHeader == nil && len(entries) > 0 { + b.ProxyConfigApplyConfiguration.ProxyConnectHeader = make(map[string][]corev1.SecretKeySelector, len(entries)) + } + for k, v := range entries { + b.ProxyConfigApplyConfiguration.ProxyConnectHeader[k] = v + } + return b +} diff --git a/vendor/github.com/prometheus-operator/prometheus-operator/pkg/client/applyconfiguration/monitoring/v1/httpconfigwithtlsfiles.go b/vendor/github.com/prometheus-operator/prometheus-operator/pkg/client/applyconfiguration/monitoring/v1/httpconfigwithtlsfiles.go new file mode 100644 index 0000000000..fba1aab365 --- /dev/null +++ b/vendor/github.com/prometheus-operator/prometheus-operator/pkg/client/applyconfiguration/monitoring/v1/httpconfigwithtlsfiles.go @@ -0,0 +1,90 @@ +// Copyright The prometheus-operator Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1 + +import ( + corev1 "k8s.io/api/core/v1" +) + +// HTTPConfigWithTLSFilesApplyConfiguration represents a declarative configuration of the HTTPConfigWithTLSFiles type for use +// with apply. +type HTTPConfigWithTLSFilesApplyConfiguration struct { + HTTPConfigWithoutTLSApplyConfiguration `json:",inline"` + TLSConfig *TLSConfigApplyConfiguration `json:"tlsConfig,omitempty"` +} + +// HTTPConfigWithTLSFilesApplyConfiguration constructs a declarative configuration of the HTTPConfigWithTLSFiles type for use with +// apply. +func HTTPConfigWithTLSFiles() *HTTPConfigWithTLSFilesApplyConfiguration { + return &HTTPConfigWithTLSFilesApplyConfiguration{} +} + +// WithAuthorization sets the Authorization field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Authorization field is set to the value of the last call. +func (b *HTTPConfigWithTLSFilesApplyConfiguration) WithAuthorization(value *SafeAuthorizationApplyConfiguration) *HTTPConfigWithTLSFilesApplyConfiguration { + b.HTTPConfigWithoutTLSApplyConfiguration.Authorization = value + return b +} + +// WithBasicAuth sets the BasicAuth field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the BasicAuth field is set to the value of the last call. +func (b *HTTPConfigWithTLSFilesApplyConfiguration) WithBasicAuth(value *BasicAuthApplyConfiguration) *HTTPConfigWithTLSFilesApplyConfiguration { + b.HTTPConfigWithoutTLSApplyConfiguration.BasicAuth = value + return b +} + +// WithOAuth2 sets the OAuth2 field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the OAuth2 field is set to the value of the last call. +func (b *HTTPConfigWithTLSFilesApplyConfiguration) WithOAuth2(value *OAuth2ApplyConfiguration) *HTTPConfigWithTLSFilesApplyConfiguration { + b.HTTPConfigWithoutTLSApplyConfiguration.OAuth2 = value + return b +} + +// WithBearerTokenSecret sets the BearerTokenSecret field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the BearerTokenSecret field is set to the value of the last call. +func (b *HTTPConfigWithTLSFilesApplyConfiguration) WithBearerTokenSecret(value corev1.SecretKeySelector) *HTTPConfigWithTLSFilesApplyConfiguration { + b.HTTPConfigWithoutTLSApplyConfiguration.BearerTokenSecret = &value + return b +} + +// WithFollowRedirects sets the FollowRedirects field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the FollowRedirects field is set to the value of the last call. +func (b *HTTPConfigWithTLSFilesApplyConfiguration) WithFollowRedirects(value bool) *HTTPConfigWithTLSFilesApplyConfiguration { + b.HTTPConfigWithoutTLSApplyConfiguration.FollowRedirects = &value + return b +} + +// WithEnableHTTP2 sets the EnableHTTP2 field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the EnableHTTP2 field is set to the value of the last call. +func (b *HTTPConfigWithTLSFilesApplyConfiguration) WithEnableHTTP2(value bool) *HTTPConfigWithTLSFilesApplyConfiguration { + b.HTTPConfigWithoutTLSApplyConfiguration.EnableHTTP2 = &value + return b +} + +// WithTLSConfig sets the TLSConfig field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the TLSConfig field is set to the value of the last call. +func (b *HTTPConfigWithTLSFilesApplyConfiguration) WithTLSConfig(value *TLSConfigApplyConfiguration) *HTTPConfigWithTLSFilesApplyConfiguration { + b.TLSConfig = value + return b +} diff --git a/vendor/github.com/prometheus-operator/prometheus-operator/pkg/client/applyconfiguration/monitoring/v1/managedidentity.go b/vendor/github.com/prometheus-operator/prometheus-operator/pkg/client/applyconfiguration/monitoring/v1/managedidentity.go new file mode 100644 index 0000000000..d43d114dc1 --- /dev/null +++ b/vendor/github.com/prometheus-operator/prometheus-operator/pkg/client/applyconfiguration/monitoring/v1/managedidentity.go @@ -0,0 +1,37 @@ +// Copyright The prometheus-operator Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1 + +// ManagedIdentityApplyConfiguration represents a declarative configuration of the ManagedIdentity type for use +// with apply. +type ManagedIdentityApplyConfiguration struct { + ClientID *string `json:"clientId,omitempty"` +} + +// ManagedIdentityApplyConfiguration constructs a declarative configuration of the ManagedIdentity type for use with +// apply. +func ManagedIdentity() *ManagedIdentityApplyConfiguration { + return &ManagedIdentityApplyConfiguration{} +} + +// WithClientID sets the ClientID field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the ClientID field is set to the value of the last call. +func (b *ManagedIdentityApplyConfiguration) WithClientID(value string) *ManagedIdentityApplyConfiguration { + b.ClientID = &value + return b +} diff --git a/vendor/github.com/prometheus-operator/prometheus-operator/pkg/client/applyconfiguration/monitoring/v1/metadataconfig.go b/vendor/github.com/prometheus-operator/prometheus-operator/pkg/client/applyconfiguration/monitoring/v1/metadataconfig.go new file mode 100644 index 0000000000..ff4c22f428 --- /dev/null +++ b/vendor/github.com/prometheus-operator/prometheus-operator/pkg/client/applyconfiguration/monitoring/v1/metadataconfig.go @@ -0,0 +1,59 @@ +// Copyright The prometheus-operator Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1 + +import ( + monitoringv1 "github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring/v1" +) + +// MetadataConfigApplyConfiguration represents a declarative configuration of the MetadataConfig type for use +// with apply. +type MetadataConfigApplyConfiguration struct { + Send *bool `json:"send,omitempty"` + SendInterval *monitoringv1.Duration `json:"sendInterval,omitempty"` + MaxSamplesPerSend *int32 `json:"maxSamplesPerSend,omitempty"` +} + +// MetadataConfigApplyConfiguration constructs a declarative configuration of the MetadataConfig type for use with +// apply. +func MetadataConfig() *MetadataConfigApplyConfiguration { + return &MetadataConfigApplyConfiguration{} +} + +// WithSend sets the Send field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Send field is set to the value of the last call. +func (b *MetadataConfigApplyConfiguration) WithSend(value bool) *MetadataConfigApplyConfiguration { + b.Send = &value + return b +} + +// WithSendInterval sets the SendInterval field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the SendInterval field is set to the value of the last call. +func (b *MetadataConfigApplyConfiguration) WithSendInterval(value monitoringv1.Duration) *MetadataConfigApplyConfiguration { + b.SendInterval = &value + return b +} + +// WithMaxSamplesPerSend sets the MaxSamplesPerSend field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the MaxSamplesPerSend field is set to the value of the last call. +func (b *MetadataConfigApplyConfiguration) WithMaxSamplesPerSend(value int32) *MetadataConfigApplyConfiguration { + b.MaxSamplesPerSend = &value + return b +} diff --git a/vendor/github.com/prometheus-operator/prometheus-operator/pkg/client/applyconfiguration/monitoring/v1/namespaceselector.go b/vendor/github.com/prometheus-operator/prometheus-operator/pkg/client/applyconfiguration/monitoring/v1/namespaceselector.go new file mode 100644 index 0000000000..ac3845a875 --- /dev/null +++ b/vendor/github.com/prometheus-operator/prometheus-operator/pkg/client/applyconfiguration/monitoring/v1/namespaceselector.go @@ -0,0 +1,48 @@ +// Copyright The prometheus-operator Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1 + +// NamespaceSelectorApplyConfiguration represents a declarative configuration of the NamespaceSelector type for use +// with apply. +type NamespaceSelectorApplyConfiguration struct { + Any *bool `json:"any,omitempty"` + MatchNames []string `json:"matchNames,omitempty"` +} + +// NamespaceSelectorApplyConfiguration constructs a declarative configuration of the NamespaceSelector type for use with +// apply. +func NamespaceSelector() *NamespaceSelectorApplyConfiguration { + return &NamespaceSelectorApplyConfiguration{} +} + +// WithAny sets the Any field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Any field is set to the value of the last call. +func (b *NamespaceSelectorApplyConfiguration) WithAny(value bool) *NamespaceSelectorApplyConfiguration { + b.Any = &value + return b +} + +// WithMatchNames adds the given value to the MatchNames field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the MatchNames field. +func (b *NamespaceSelectorApplyConfiguration) WithMatchNames(values ...string) *NamespaceSelectorApplyConfiguration { + for i := range values { + b.MatchNames = append(b.MatchNames, values[i]) + } + return b +} diff --git a/vendor/github.com/prometheus-operator/prometheus-operator/pkg/client/applyconfiguration/monitoring/v1/nativehistogramconfig.go b/vendor/github.com/prometheus-operator/prometheus-operator/pkg/client/applyconfiguration/monitoring/v1/nativehistogramconfig.go new file mode 100644 index 0000000000..a6831e8779 --- /dev/null +++ b/vendor/github.com/prometheus-operator/prometheus-operator/pkg/client/applyconfiguration/monitoring/v1/nativehistogramconfig.go @@ -0,0 +1,77 @@ +// Copyright The prometheus-operator Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1 + +import ( + resource "k8s.io/apimachinery/pkg/api/resource" +) + +// NativeHistogramConfigApplyConfiguration represents a declarative configuration of the NativeHistogramConfig type for use +// with apply. +type NativeHistogramConfigApplyConfiguration struct { + ScrapeNativeHistograms *bool `json:"scrapeNativeHistograms,omitempty"` + ScrapeClassicHistograms *bool `json:"scrapeClassicHistograms,omitempty"` + NativeHistogramBucketLimit *uint64 `json:"nativeHistogramBucketLimit,omitempty"` + NativeHistogramMinBucketFactor *resource.Quantity `json:"nativeHistogramMinBucketFactor,omitempty"` + ConvertClassicHistogramsToNHCB *bool `json:"convertClassicHistogramsToNHCB,omitempty"` +} + +// NativeHistogramConfigApplyConfiguration constructs a declarative configuration of the NativeHistogramConfig type for use with +// apply. +func NativeHistogramConfig() *NativeHistogramConfigApplyConfiguration { + return &NativeHistogramConfigApplyConfiguration{} +} + +// WithScrapeNativeHistograms sets the ScrapeNativeHistograms field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the ScrapeNativeHistograms field is set to the value of the last call. +func (b *NativeHistogramConfigApplyConfiguration) WithScrapeNativeHistograms(value bool) *NativeHistogramConfigApplyConfiguration { + b.ScrapeNativeHistograms = &value + return b +} + +// WithScrapeClassicHistograms sets the ScrapeClassicHistograms field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the ScrapeClassicHistograms field is set to the value of the last call. +func (b *NativeHistogramConfigApplyConfiguration) WithScrapeClassicHistograms(value bool) *NativeHistogramConfigApplyConfiguration { + b.ScrapeClassicHistograms = &value + return b +} + +// WithNativeHistogramBucketLimit sets the NativeHistogramBucketLimit field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the NativeHistogramBucketLimit field is set to the value of the last call. +func (b *NativeHistogramConfigApplyConfiguration) WithNativeHistogramBucketLimit(value uint64) *NativeHistogramConfigApplyConfiguration { + b.NativeHistogramBucketLimit = &value + return b +} + +// WithNativeHistogramMinBucketFactor sets the NativeHistogramMinBucketFactor field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the NativeHistogramMinBucketFactor field is set to the value of the last call. +func (b *NativeHistogramConfigApplyConfiguration) WithNativeHistogramMinBucketFactor(value resource.Quantity) *NativeHistogramConfigApplyConfiguration { + b.NativeHistogramMinBucketFactor = &value + return b +} + +// WithConvertClassicHistogramsToNHCB sets the ConvertClassicHistogramsToNHCB field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the ConvertClassicHistogramsToNHCB field is set to the value of the last call. +func (b *NativeHistogramConfigApplyConfiguration) WithConvertClassicHistogramsToNHCB(value bool) *NativeHistogramConfigApplyConfiguration { + b.ConvertClassicHistogramsToNHCB = &value + return b +} diff --git a/vendor/github.com/prometheus-operator/prometheus-operator/pkg/client/applyconfiguration/monitoring/v1/oauth2.go b/vendor/github.com/prometheus-operator/prometheus-operator/pkg/client/applyconfiguration/monitoring/v1/oauth2.go new file mode 100644 index 0000000000..2e9ce1cdb2 --- /dev/null +++ b/vendor/github.com/prometheus-operator/prometheus-operator/pkg/client/applyconfiguration/monitoring/v1/oauth2.go @@ -0,0 +1,133 @@ +// Copyright The prometheus-operator Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1 + +import ( + corev1 "k8s.io/api/core/v1" +) + +// OAuth2ApplyConfiguration represents a declarative configuration of the OAuth2 type for use +// with apply. +type OAuth2ApplyConfiguration struct { + ClientID *SecretOrConfigMapApplyConfiguration `json:"clientId,omitempty"` + ClientSecret *corev1.SecretKeySelector `json:"clientSecret,omitempty"` + TokenURL *string `json:"tokenUrl,omitempty"` + Scopes []string `json:"scopes,omitempty"` + EndpointParams map[string]string `json:"endpointParams,omitempty"` + TLSConfig *SafeTLSConfigApplyConfiguration `json:"tlsConfig,omitempty"` + ProxyConfigApplyConfiguration `json:",inline"` +} + +// OAuth2ApplyConfiguration constructs a declarative configuration of the OAuth2 type for use with +// apply. +func OAuth2() *OAuth2ApplyConfiguration { + return &OAuth2ApplyConfiguration{} +} + +// WithClientID sets the ClientID field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the ClientID field is set to the value of the last call. +func (b *OAuth2ApplyConfiguration) WithClientID(value *SecretOrConfigMapApplyConfiguration) *OAuth2ApplyConfiguration { + b.ClientID = value + return b +} + +// WithClientSecret sets the ClientSecret field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the ClientSecret field is set to the value of the last call. +func (b *OAuth2ApplyConfiguration) WithClientSecret(value corev1.SecretKeySelector) *OAuth2ApplyConfiguration { + b.ClientSecret = &value + return b +} + +// WithTokenURL sets the TokenURL field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the TokenURL field is set to the value of the last call. +func (b *OAuth2ApplyConfiguration) WithTokenURL(value string) *OAuth2ApplyConfiguration { + b.TokenURL = &value + return b +} + +// WithScopes adds the given value to the Scopes field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the Scopes field. +func (b *OAuth2ApplyConfiguration) WithScopes(values ...string) *OAuth2ApplyConfiguration { + for i := range values { + b.Scopes = append(b.Scopes, values[i]) + } + return b +} + +// WithEndpointParams puts the entries into the EndpointParams field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, the entries provided by each call will be put on the EndpointParams field, +// overwriting an existing map entries in EndpointParams field with the same key. +func (b *OAuth2ApplyConfiguration) WithEndpointParams(entries map[string]string) *OAuth2ApplyConfiguration { + if b.EndpointParams == nil && len(entries) > 0 { + b.EndpointParams = make(map[string]string, len(entries)) + } + for k, v := range entries { + b.EndpointParams[k] = v + } + return b +} + +// WithTLSConfig sets the TLSConfig field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the TLSConfig field is set to the value of the last call. +func (b *OAuth2ApplyConfiguration) WithTLSConfig(value *SafeTLSConfigApplyConfiguration) *OAuth2ApplyConfiguration { + b.TLSConfig = value + return b +} + +// WithProxyURL sets the ProxyURL field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the ProxyURL field is set to the value of the last call. +func (b *OAuth2ApplyConfiguration) WithProxyURL(value string) *OAuth2ApplyConfiguration { + b.ProxyConfigApplyConfiguration.ProxyURL = &value + return b +} + +// WithNoProxy sets the NoProxy field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the NoProxy field is set to the value of the last call. +func (b *OAuth2ApplyConfiguration) WithNoProxy(value string) *OAuth2ApplyConfiguration { + b.ProxyConfigApplyConfiguration.NoProxy = &value + return b +} + +// WithProxyFromEnvironment sets the ProxyFromEnvironment field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the ProxyFromEnvironment field is set to the value of the last call. +func (b *OAuth2ApplyConfiguration) WithProxyFromEnvironment(value bool) *OAuth2ApplyConfiguration { + b.ProxyConfigApplyConfiguration.ProxyFromEnvironment = &value + return b +} + +// WithProxyConnectHeader puts the entries into the ProxyConnectHeader field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, the entries provided by each call will be put on the ProxyConnectHeader field, +// overwriting an existing map entries in ProxyConnectHeader field with the same key. +func (b *OAuth2ApplyConfiguration) WithProxyConnectHeader(entries map[string][]corev1.SecretKeySelector) *OAuth2ApplyConfiguration { + if b.ProxyConfigApplyConfiguration.ProxyConnectHeader == nil && len(entries) > 0 { + b.ProxyConfigApplyConfiguration.ProxyConnectHeader = make(map[string][]corev1.SecretKeySelector, len(entries)) + } + for k, v := range entries { + b.ProxyConfigApplyConfiguration.ProxyConnectHeader[k] = v + } + return b +} diff --git a/vendor/github.com/prometheus-operator/prometheus-operator/pkg/client/applyconfiguration/monitoring/v1/objectreference.go b/vendor/github.com/prometheus-operator/prometheus-operator/pkg/client/applyconfiguration/monitoring/v1/objectreference.go new file mode 100644 index 0000000000..5bdf7ca8bd --- /dev/null +++ b/vendor/github.com/prometheus-operator/prometheus-operator/pkg/client/applyconfiguration/monitoring/v1/objectreference.go @@ -0,0 +1,64 @@ +// Copyright The prometheus-operator Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1 + +// ObjectReferenceApplyConfiguration represents a declarative configuration of the ObjectReference type for use +// with apply. +type ObjectReferenceApplyConfiguration struct { + Group *string `json:"group,omitempty"` + Resource *string `json:"resource,omitempty"` + Namespace *string `json:"namespace,omitempty"` + Name *string `json:"name,omitempty"` +} + +// ObjectReferenceApplyConfiguration constructs a declarative configuration of the ObjectReference type for use with +// apply. +func ObjectReference() *ObjectReferenceApplyConfiguration { + return &ObjectReferenceApplyConfiguration{} +} + +// WithGroup sets the Group field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Group field is set to the value of the last call. +func (b *ObjectReferenceApplyConfiguration) WithGroup(value string) *ObjectReferenceApplyConfiguration { + b.Group = &value + return b +} + +// WithResource sets the Resource field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Resource field is set to the value of the last call. +func (b *ObjectReferenceApplyConfiguration) WithResource(value string) *ObjectReferenceApplyConfiguration { + b.Resource = &value + return b +} + +// WithNamespace sets the Namespace field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Namespace field is set to the value of the last call. +func (b *ObjectReferenceApplyConfiguration) WithNamespace(value string) *ObjectReferenceApplyConfiguration { + b.Namespace = &value + return b +} + +// WithName sets the Name field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Name field is set to the value of the last call. +func (b *ObjectReferenceApplyConfiguration) WithName(value string) *ObjectReferenceApplyConfiguration { + b.Name = &value + return b +} diff --git a/vendor/github.com/prometheus-operator/prometheus-operator/pkg/client/applyconfiguration/monitoring/v1/otlpconfig.go b/vendor/github.com/prometheus-operator/prometheus-operator/pkg/client/applyconfiguration/monitoring/v1/otlpconfig.go new file mode 100644 index 0000000000..8e07ceb571 --- /dev/null +++ b/vendor/github.com/prometheus-operator/prometheus-operator/pkg/client/applyconfiguration/monitoring/v1/otlpconfig.go @@ -0,0 +1,99 @@ +// Copyright The prometheus-operator Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1 + +import ( + monitoringv1 "github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring/v1" +) + +// OTLPConfigApplyConfiguration represents a declarative configuration of the OTLPConfig type for use +// with apply. +type OTLPConfigApplyConfiguration struct { + PromoteAllResourceAttributes *bool `json:"promoteAllResourceAttributes,omitempty"` + IgnoreResourceAttributes []string `json:"ignoreResourceAttributes,omitempty"` + PromoteResourceAttributes []string `json:"promoteResourceAttributes,omitempty"` + TranslationStrategy *monitoringv1.TranslationStrategyOption `json:"translationStrategy,omitempty"` + KeepIdentifyingResourceAttributes *bool `json:"keepIdentifyingResourceAttributes,omitempty"` + ConvertHistogramsToNHCB *bool `json:"convertHistogramsToNHCB,omitempty"` + PromoteScopeMetadata *bool `json:"promoteScopeMetadata,omitempty"` +} + +// OTLPConfigApplyConfiguration constructs a declarative configuration of the OTLPConfig type for use with +// apply. +func OTLPConfig() *OTLPConfigApplyConfiguration { + return &OTLPConfigApplyConfiguration{} +} + +// WithPromoteAllResourceAttributes sets the PromoteAllResourceAttributes field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the PromoteAllResourceAttributes field is set to the value of the last call. +func (b *OTLPConfigApplyConfiguration) WithPromoteAllResourceAttributes(value bool) *OTLPConfigApplyConfiguration { + b.PromoteAllResourceAttributes = &value + return b +} + +// WithIgnoreResourceAttributes adds the given value to the IgnoreResourceAttributes field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the IgnoreResourceAttributes field. +func (b *OTLPConfigApplyConfiguration) WithIgnoreResourceAttributes(values ...string) *OTLPConfigApplyConfiguration { + for i := range values { + b.IgnoreResourceAttributes = append(b.IgnoreResourceAttributes, values[i]) + } + return b +} + +// WithPromoteResourceAttributes adds the given value to the PromoteResourceAttributes field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the PromoteResourceAttributes field. +func (b *OTLPConfigApplyConfiguration) WithPromoteResourceAttributes(values ...string) *OTLPConfigApplyConfiguration { + for i := range values { + b.PromoteResourceAttributes = append(b.PromoteResourceAttributes, values[i]) + } + return b +} + +// WithTranslationStrategy sets the TranslationStrategy field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the TranslationStrategy field is set to the value of the last call. +func (b *OTLPConfigApplyConfiguration) WithTranslationStrategy(value monitoringv1.TranslationStrategyOption) *OTLPConfigApplyConfiguration { + b.TranslationStrategy = &value + return b +} + +// WithKeepIdentifyingResourceAttributes sets the KeepIdentifyingResourceAttributes field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the KeepIdentifyingResourceAttributes field is set to the value of the last call. +func (b *OTLPConfigApplyConfiguration) WithKeepIdentifyingResourceAttributes(value bool) *OTLPConfigApplyConfiguration { + b.KeepIdentifyingResourceAttributes = &value + return b +} + +// WithConvertHistogramsToNHCB sets the ConvertHistogramsToNHCB field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the ConvertHistogramsToNHCB field is set to the value of the last call. +func (b *OTLPConfigApplyConfiguration) WithConvertHistogramsToNHCB(value bool) *OTLPConfigApplyConfiguration { + b.ConvertHistogramsToNHCB = &value + return b +} + +// WithPromoteScopeMetadata sets the PromoteScopeMetadata field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the PromoteScopeMetadata field is set to the value of the last call. +func (b *OTLPConfigApplyConfiguration) WithPromoteScopeMetadata(value bool) *OTLPConfigApplyConfiguration { + b.PromoteScopeMetadata = &value + return b +} diff --git a/vendor/github.com/prometheus-operator/prometheus-operator/pkg/client/applyconfiguration/monitoring/v1/poddnsconfig.go b/vendor/github.com/prometheus-operator/prometheus-operator/pkg/client/applyconfiguration/monitoring/v1/poddnsconfig.go new file mode 100644 index 0000000000..bc3d878f95 --- /dev/null +++ b/vendor/github.com/prometheus-operator/prometheus-operator/pkg/client/applyconfiguration/monitoring/v1/poddnsconfig.go @@ -0,0 +1,64 @@ +// Copyright The prometheus-operator Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1 + +// PodDNSConfigApplyConfiguration represents a declarative configuration of the PodDNSConfig type for use +// with apply. +type PodDNSConfigApplyConfiguration struct { + Nameservers []string `json:"nameservers,omitempty"` + Searches []string `json:"searches,omitempty"` + Options []PodDNSConfigOptionApplyConfiguration `json:"options,omitempty"` +} + +// PodDNSConfigApplyConfiguration constructs a declarative configuration of the PodDNSConfig type for use with +// apply. +func PodDNSConfig() *PodDNSConfigApplyConfiguration { + return &PodDNSConfigApplyConfiguration{} +} + +// WithNameservers adds the given value to the Nameservers field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the Nameservers field. +func (b *PodDNSConfigApplyConfiguration) WithNameservers(values ...string) *PodDNSConfigApplyConfiguration { + for i := range values { + b.Nameservers = append(b.Nameservers, values[i]) + } + return b +} + +// WithSearches adds the given value to the Searches field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the Searches field. +func (b *PodDNSConfigApplyConfiguration) WithSearches(values ...string) *PodDNSConfigApplyConfiguration { + for i := range values { + b.Searches = append(b.Searches, values[i]) + } + return b +} + +// WithOptions adds the given value to the Options field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the Options field. +func (b *PodDNSConfigApplyConfiguration) WithOptions(values ...*PodDNSConfigOptionApplyConfiguration) *PodDNSConfigApplyConfiguration { + for i := range values { + if values[i] == nil { + panic("nil value passed to WithOptions") + } + b.Options = append(b.Options, *values[i]) + } + return b +} diff --git a/vendor/github.com/prometheus-operator/prometheus-operator/pkg/client/applyconfiguration/monitoring/v1/poddnsconfigoption.go b/vendor/github.com/prometheus-operator/prometheus-operator/pkg/client/applyconfiguration/monitoring/v1/poddnsconfigoption.go new file mode 100644 index 0000000000..ea47439543 --- /dev/null +++ b/vendor/github.com/prometheus-operator/prometheus-operator/pkg/client/applyconfiguration/monitoring/v1/poddnsconfigoption.go @@ -0,0 +1,46 @@ +// Copyright The prometheus-operator Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1 + +// PodDNSConfigOptionApplyConfiguration represents a declarative configuration of the PodDNSConfigOption type for use +// with apply. +type PodDNSConfigOptionApplyConfiguration struct { + Name *string `json:"name,omitempty"` + Value *string `json:"value,omitempty"` +} + +// PodDNSConfigOptionApplyConfiguration constructs a declarative configuration of the PodDNSConfigOption type for use with +// apply. +func PodDNSConfigOption() *PodDNSConfigOptionApplyConfiguration { + return &PodDNSConfigOptionApplyConfiguration{} +} + +// WithName sets the Name field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Name field is set to the value of the last call. +func (b *PodDNSConfigOptionApplyConfiguration) WithName(value string) *PodDNSConfigOptionApplyConfiguration { + b.Name = &value + return b +} + +// WithValue sets the Value field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Value field is set to the value of the last call. +func (b *PodDNSConfigOptionApplyConfiguration) WithValue(value string) *PodDNSConfigOptionApplyConfiguration { + b.Value = &value + return b +} diff --git a/vendor/github.com/prometheus-operator/prometheus-operator/pkg/client/applyconfiguration/monitoring/v1/podmetricsendpoint.go b/vendor/github.com/prometheus-operator/prometheus-operator/pkg/client/applyconfiguration/monitoring/v1/podmetricsendpoint.go new file mode 100644 index 0000000000..2d0a81428b --- /dev/null +++ b/vendor/github.com/prometheus-operator/prometheus-operator/pkg/client/applyconfiguration/monitoring/v1/podmetricsendpoint.go @@ -0,0 +1,271 @@ +// Copyright The prometheus-operator Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1 + +import ( + monitoringv1 "github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring/v1" + corev1 "k8s.io/api/core/v1" + intstr "k8s.io/apimachinery/pkg/util/intstr" +) + +// PodMetricsEndpointApplyConfiguration represents a declarative configuration of the PodMetricsEndpoint type for use +// with apply. +type PodMetricsEndpointApplyConfiguration struct { + Port *string `json:"port,omitempty"` + PortNumber *int32 `json:"portNumber,omitempty"` + TargetPort *intstr.IntOrString `json:"targetPort,omitempty"` + Path *string `json:"path,omitempty"` + Scheme *monitoringv1.Scheme `json:"scheme,omitempty"` + Params map[string][]string `json:"params,omitempty"` + Interval *monitoringv1.Duration `json:"interval,omitempty"` + ScrapeTimeout *monitoringv1.Duration `json:"scrapeTimeout,omitempty"` + HonorLabels *bool `json:"honorLabels,omitempty"` + HonorTimestamps *bool `json:"honorTimestamps,omitempty"` + TrackTimestampsStaleness *bool `json:"trackTimestampsStaleness,omitempty"` + MetricRelabelConfigs []RelabelConfigApplyConfiguration `json:"metricRelabelings,omitempty"` + RelabelConfigs []RelabelConfigApplyConfiguration `json:"relabelings,omitempty"` + FilterRunning *bool `json:"filterRunning,omitempty"` + HTTPConfigWithProxyApplyConfiguration `json:",inline"` +} + +// PodMetricsEndpointApplyConfiguration constructs a declarative configuration of the PodMetricsEndpoint type for use with +// apply. +func PodMetricsEndpoint() *PodMetricsEndpointApplyConfiguration { + return &PodMetricsEndpointApplyConfiguration{} +} + +// WithPort sets the Port field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Port field is set to the value of the last call. +func (b *PodMetricsEndpointApplyConfiguration) WithPort(value string) *PodMetricsEndpointApplyConfiguration { + b.Port = &value + return b +} + +// WithPortNumber sets the PortNumber field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the PortNumber field is set to the value of the last call. +func (b *PodMetricsEndpointApplyConfiguration) WithPortNumber(value int32) *PodMetricsEndpointApplyConfiguration { + b.PortNumber = &value + return b +} + +// WithTargetPort sets the TargetPort field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the TargetPort field is set to the value of the last call. +func (b *PodMetricsEndpointApplyConfiguration) WithTargetPort(value intstr.IntOrString) *PodMetricsEndpointApplyConfiguration { + b.TargetPort = &value + return b +} + +// WithPath sets the Path field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Path field is set to the value of the last call. +func (b *PodMetricsEndpointApplyConfiguration) WithPath(value string) *PodMetricsEndpointApplyConfiguration { + b.Path = &value + return b +} + +// WithScheme sets the Scheme field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Scheme field is set to the value of the last call. +func (b *PodMetricsEndpointApplyConfiguration) WithScheme(value monitoringv1.Scheme) *PodMetricsEndpointApplyConfiguration { + b.Scheme = &value + return b +} + +// WithParams puts the entries into the Params field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, the entries provided by each call will be put on the Params field, +// overwriting an existing map entries in Params field with the same key. +func (b *PodMetricsEndpointApplyConfiguration) WithParams(entries map[string][]string) *PodMetricsEndpointApplyConfiguration { + if b.Params == nil && len(entries) > 0 { + b.Params = make(map[string][]string, len(entries)) + } + for k, v := range entries { + b.Params[k] = v + } + return b +} + +// WithInterval sets the Interval field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Interval field is set to the value of the last call. +func (b *PodMetricsEndpointApplyConfiguration) WithInterval(value monitoringv1.Duration) *PodMetricsEndpointApplyConfiguration { + b.Interval = &value + return b +} + +// WithScrapeTimeout sets the ScrapeTimeout field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the ScrapeTimeout field is set to the value of the last call. +func (b *PodMetricsEndpointApplyConfiguration) WithScrapeTimeout(value monitoringv1.Duration) *PodMetricsEndpointApplyConfiguration { + b.ScrapeTimeout = &value + return b +} + +// WithHonorLabels sets the HonorLabels field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the HonorLabels field is set to the value of the last call. +func (b *PodMetricsEndpointApplyConfiguration) WithHonorLabels(value bool) *PodMetricsEndpointApplyConfiguration { + b.HonorLabels = &value + return b +} + +// WithHonorTimestamps sets the HonorTimestamps field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the HonorTimestamps field is set to the value of the last call. +func (b *PodMetricsEndpointApplyConfiguration) WithHonorTimestamps(value bool) *PodMetricsEndpointApplyConfiguration { + b.HonorTimestamps = &value + return b +} + +// WithTrackTimestampsStaleness sets the TrackTimestampsStaleness field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the TrackTimestampsStaleness field is set to the value of the last call. +func (b *PodMetricsEndpointApplyConfiguration) WithTrackTimestampsStaleness(value bool) *PodMetricsEndpointApplyConfiguration { + b.TrackTimestampsStaleness = &value + return b +} + +// WithMetricRelabelConfigs adds the given value to the MetricRelabelConfigs field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the MetricRelabelConfigs field. +func (b *PodMetricsEndpointApplyConfiguration) WithMetricRelabelConfigs(values ...*RelabelConfigApplyConfiguration) *PodMetricsEndpointApplyConfiguration { + for i := range values { + if values[i] == nil { + panic("nil value passed to WithMetricRelabelConfigs") + } + b.MetricRelabelConfigs = append(b.MetricRelabelConfigs, *values[i]) + } + return b +} + +// WithRelabelConfigs adds the given value to the RelabelConfigs field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the RelabelConfigs field. +func (b *PodMetricsEndpointApplyConfiguration) WithRelabelConfigs(values ...*RelabelConfigApplyConfiguration) *PodMetricsEndpointApplyConfiguration { + for i := range values { + if values[i] == nil { + panic("nil value passed to WithRelabelConfigs") + } + b.RelabelConfigs = append(b.RelabelConfigs, *values[i]) + } + return b +} + +// WithFilterRunning sets the FilterRunning field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the FilterRunning field is set to the value of the last call. +func (b *PodMetricsEndpointApplyConfiguration) WithFilterRunning(value bool) *PodMetricsEndpointApplyConfiguration { + b.FilterRunning = &value + return b +} + +// WithAuthorization sets the Authorization field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Authorization field is set to the value of the last call. +func (b *PodMetricsEndpointApplyConfiguration) WithAuthorization(value *SafeAuthorizationApplyConfiguration) *PodMetricsEndpointApplyConfiguration { + b.HTTPConfigWithoutTLSApplyConfiguration.Authorization = value + return b +} + +// WithBasicAuth sets the BasicAuth field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the BasicAuth field is set to the value of the last call. +func (b *PodMetricsEndpointApplyConfiguration) WithBasicAuth(value *BasicAuthApplyConfiguration) *PodMetricsEndpointApplyConfiguration { + b.HTTPConfigWithoutTLSApplyConfiguration.BasicAuth = value + return b +} + +// WithOAuth2 sets the OAuth2 field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the OAuth2 field is set to the value of the last call. +func (b *PodMetricsEndpointApplyConfiguration) WithOAuth2(value *OAuth2ApplyConfiguration) *PodMetricsEndpointApplyConfiguration { + b.HTTPConfigWithoutTLSApplyConfiguration.OAuth2 = value + return b +} + +// WithBearerTokenSecret sets the BearerTokenSecret field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the BearerTokenSecret field is set to the value of the last call. +func (b *PodMetricsEndpointApplyConfiguration) WithBearerTokenSecret(value corev1.SecretKeySelector) *PodMetricsEndpointApplyConfiguration { + b.HTTPConfigWithoutTLSApplyConfiguration.BearerTokenSecret = &value + return b +} + +// WithFollowRedirects sets the FollowRedirects field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the FollowRedirects field is set to the value of the last call. +func (b *PodMetricsEndpointApplyConfiguration) WithFollowRedirects(value bool) *PodMetricsEndpointApplyConfiguration { + b.HTTPConfigWithoutTLSApplyConfiguration.FollowRedirects = &value + return b +} + +// WithEnableHTTP2 sets the EnableHTTP2 field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the EnableHTTP2 field is set to the value of the last call. +func (b *PodMetricsEndpointApplyConfiguration) WithEnableHTTP2(value bool) *PodMetricsEndpointApplyConfiguration { + b.HTTPConfigWithoutTLSApplyConfiguration.EnableHTTP2 = &value + return b +} + +// WithTLSConfig sets the TLSConfig field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the TLSConfig field is set to the value of the last call. +func (b *PodMetricsEndpointApplyConfiguration) WithTLSConfig(value *SafeTLSConfigApplyConfiguration) *PodMetricsEndpointApplyConfiguration { + b.HTTPConfigApplyConfiguration.TLSConfig = value + return b +} + +// WithProxyURL sets the ProxyURL field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the ProxyURL field is set to the value of the last call. +func (b *PodMetricsEndpointApplyConfiguration) WithProxyURL(value string) *PodMetricsEndpointApplyConfiguration { + b.ProxyConfigApplyConfiguration.ProxyURL = &value + return b +} + +// WithNoProxy sets the NoProxy field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the NoProxy field is set to the value of the last call. +func (b *PodMetricsEndpointApplyConfiguration) WithNoProxy(value string) *PodMetricsEndpointApplyConfiguration { + b.ProxyConfigApplyConfiguration.NoProxy = &value + return b +} + +// WithProxyFromEnvironment sets the ProxyFromEnvironment field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the ProxyFromEnvironment field is set to the value of the last call. +func (b *PodMetricsEndpointApplyConfiguration) WithProxyFromEnvironment(value bool) *PodMetricsEndpointApplyConfiguration { + b.ProxyConfigApplyConfiguration.ProxyFromEnvironment = &value + return b +} + +// WithProxyConnectHeader puts the entries into the ProxyConnectHeader field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, the entries provided by each call will be put on the ProxyConnectHeader field, +// overwriting an existing map entries in ProxyConnectHeader field with the same key. +func (b *PodMetricsEndpointApplyConfiguration) WithProxyConnectHeader(entries map[string][]corev1.SecretKeySelector) *PodMetricsEndpointApplyConfiguration { + if b.ProxyConfigApplyConfiguration.ProxyConnectHeader == nil && len(entries) > 0 { + b.ProxyConfigApplyConfiguration.ProxyConnectHeader = make(map[string][]corev1.SecretKeySelector, len(entries)) + } + for k, v := range entries { + b.ProxyConfigApplyConfiguration.ProxyConnectHeader[k] = v + } + return b +} diff --git a/vendor/github.com/prometheus-operator/prometheus-operator/pkg/client/applyconfiguration/monitoring/v1/podmonitor.go b/vendor/github.com/prometheus-operator/prometheus-operator/pkg/client/applyconfiguration/monitoring/v1/podmonitor.go new file mode 100644 index 0000000000..883c6a5008 --- /dev/null +++ b/vendor/github.com/prometheus-operator/prometheus-operator/pkg/client/applyconfiguration/monitoring/v1/podmonitor.go @@ -0,0 +1,240 @@ +// Copyright The prometheus-operator Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1 + +import ( + apismetav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + types "k8s.io/apimachinery/pkg/types" + metav1 "k8s.io/client-go/applyconfigurations/meta/v1" +) + +// PodMonitorApplyConfiguration represents a declarative configuration of the PodMonitor type for use +// with apply. +type PodMonitorApplyConfiguration struct { + metav1.TypeMetaApplyConfiguration `json:",inline"` + *metav1.ObjectMetaApplyConfiguration `json:"metadata,omitempty"` + Spec *PodMonitorSpecApplyConfiguration `json:"spec,omitempty"` + Status *ConfigResourceStatusApplyConfiguration `json:"status,omitempty"` +} + +// PodMonitor constructs a declarative configuration of the PodMonitor type for use with +// apply. +func PodMonitor(name, namespace string) *PodMonitorApplyConfiguration { + b := &PodMonitorApplyConfiguration{} + b.WithName(name) + b.WithNamespace(namespace) + b.WithKind("PodMonitor") + b.WithAPIVersion("monitoring.coreos.com/v1") + return b +} +func (b PodMonitorApplyConfiguration) IsApplyConfiguration() {} + +// WithKind sets the Kind field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Kind field is set to the value of the last call. +func (b *PodMonitorApplyConfiguration) WithKind(value string) *PodMonitorApplyConfiguration { + b.TypeMetaApplyConfiguration.Kind = &value + return b +} + +// WithAPIVersion sets the APIVersion field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the APIVersion field is set to the value of the last call. +func (b *PodMonitorApplyConfiguration) WithAPIVersion(value string) *PodMonitorApplyConfiguration { + b.TypeMetaApplyConfiguration.APIVersion = &value + return b +} + +// WithName sets the Name field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Name field is set to the value of the last call. +func (b *PodMonitorApplyConfiguration) WithName(value string) *PodMonitorApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.ObjectMetaApplyConfiguration.Name = &value + return b +} + +// WithGenerateName sets the GenerateName field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the GenerateName field is set to the value of the last call. +func (b *PodMonitorApplyConfiguration) WithGenerateName(value string) *PodMonitorApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.ObjectMetaApplyConfiguration.GenerateName = &value + return b +} + +// WithNamespace sets the Namespace field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Namespace field is set to the value of the last call. +func (b *PodMonitorApplyConfiguration) WithNamespace(value string) *PodMonitorApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.ObjectMetaApplyConfiguration.Namespace = &value + return b +} + +// WithUID sets the UID field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the UID field is set to the value of the last call. +func (b *PodMonitorApplyConfiguration) WithUID(value types.UID) *PodMonitorApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.ObjectMetaApplyConfiguration.UID = &value + return b +} + +// WithResourceVersion sets the ResourceVersion field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the ResourceVersion field is set to the value of the last call. +func (b *PodMonitorApplyConfiguration) WithResourceVersion(value string) *PodMonitorApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.ObjectMetaApplyConfiguration.ResourceVersion = &value + return b +} + +// WithGeneration sets the Generation field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Generation field is set to the value of the last call. +func (b *PodMonitorApplyConfiguration) WithGeneration(value int64) *PodMonitorApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.ObjectMetaApplyConfiguration.Generation = &value + return b +} + +// WithCreationTimestamp sets the CreationTimestamp field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the CreationTimestamp field is set to the value of the last call. +func (b *PodMonitorApplyConfiguration) WithCreationTimestamp(value apismetav1.Time) *PodMonitorApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.ObjectMetaApplyConfiguration.CreationTimestamp = &value + return b +} + +// WithDeletionTimestamp sets the DeletionTimestamp field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the DeletionTimestamp field is set to the value of the last call. +func (b *PodMonitorApplyConfiguration) WithDeletionTimestamp(value apismetav1.Time) *PodMonitorApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.ObjectMetaApplyConfiguration.DeletionTimestamp = &value + return b +} + +// WithDeletionGracePeriodSeconds sets the DeletionGracePeriodSeconds field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the DeletionGracePeriodSeconds field is set to the value of the last call. +func (b *PodMonitorApplyConfiguration) WithDeletionGracePeriodSeconds(value int64) *PodMonitorApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.ObjectMetaApplyConfiguration.DeletionGracePeriodSeconds = &value + return b +} + +// WithLabels puts the entries into the Labels field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, the entries provided by each call will be put on the Labels field, +// overwriting an existing map entries in Labels field with the same key. +func (b *PodMonitorApplyConfiguration) WithLabels(entries map[string]string) *PodMonitorApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + if b.ObjectMetaApplyConfiguration.Labels == nil && len(entries) > 0 { + b.ObjectMetaApplyConfiguration.Labels = make(map[string]string, len(entries)) + } + for k, v := range entries { + b.ObjectMetaApplyConfiguration.Labels[k] = v + } + return b +} + +// WithAnnotations puts the entries into the Annotations field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, the entries provided by each call will be put on the Annotations field, +// overwriting an existing map entries in Annotations field with the same key. +func (b *PodMonitorApplyConfiguration) WithAnnotations(entries map[string]string) *PodMonitorApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + if b.ObjectMetaApplyConfiguration.Annotations == nil && len(entries) > 0 { + b.ObjectMetaApplyConfiguration.Annotations = make(map[string]string, len(entries)) + } + for k, v := range entries { + b.ObjectMetaApplyConfiguration.Annotations[k] = v + } + return b +} + +// WithOwnerReferences adds the given value to the OwnerReferences field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the OwnerReferences field. +func (b *PodMonitorApplyConfiguration) WithOwnerReferences(values ...*metav1.OwnerReferenceApplyConfiguration) *PodMonitorApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + for i := range values { + if values[i] == nil { + panic("nil value passed to WithOwnerReferences") + } + b.ObjectMetaApplyConfiguration.OwnerReferences = append(b.ObjectMetaApplyConfiguration.OwnerReferences, *values[i]) + } + return b +} + +// WithFinalizers adds the given value to the Finalizers field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the Finalizers field. +func (b *PodMonitorApplyConfiguration) WithFinalizers(values ...string) *PodMonitorApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + for i := range values { + b.ObjectMetaApplyConfiguration.Finalizers = append(b.ObjectMetaApplyConfiguration.Finalizers, values[i]) + } + return b +} + +func (b *PodMonitorApplyConfiguration) ensureObjectMetaApplyConfigurationExists() { + if b.ObjectMetaApplyConfiguration == nil { + b.ObjectMetaApplyConfiguration = &metav1.ObjectMetaApplyConfiguration{} + } +} + +// WithSpec sets the Spec field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Spec field is set to the value of the last call. +func (b *PodMonitorApplyConfiguration) WithSpec(value *PodMonitorSpecApplyConfiguration) *PodMonitorApplyConfiguration { + b.Spec = value + return b +} + +// WithStatus sets the Status field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Status field is set to the value of the last call. +func (b *PodMonitorApplyConfiguration) WithStatus(value *ConfigResourceStatusApplyConfiguration) *PodMonitorApplyConfiguration { + b.Status = value + return b +} + +// GetKind retrieves the value of the Kind field in the declarative configuration. +func (b *PodMonitorApplyConfiguration) GetKind() *string { + return b.TypeMetaApplyConfiguration.Kind +} + +// GetAPIVersion retrieves the value of the APIVersion field in the declarative configuration. +func (b *PodMonitorApplyConfiguration) GetAPIVersion() *string { + return b.TypeMetaApplyConfiguration.APIVersion +} + +// GetName retrieves the value of the Name field in the declarative configuration. +func (b *PodMonitorApplyConfiguration) GetName() *string { + b.ensureObjectMetaApplyConfigurationExists() + return b.ObjectMetaApplyConfiguration.Name +} + +// GetNamespace retrieves the value of the Namespace field in the declarative configuration. +func (b *PodMonitorApplyConfiguration) GetNamespace() *string { + b.ensureObjectMetaApplyConfigurationExists() + return b.ObjectMetaApplyConfiguration.Namespace +} diff --git a/vendor/github.com/prometheus-operator/prometheus-operator/pkg/client/applyconfiguration/monitoring/v1/podmonitorspec.go b/vendor/github.com/prometheus-operator/prometheus-operator/pkg/client/applyconfiguration/monitoring/v1/podmonitorspec.go new file mode 100644 index 0000000000..3daf46fc26 --- /dev/null +++ b/vendor/github.com/prometheus-operator/prometheus-operator/pkg/client/applyconfiguration/monitoring/v1/podmonitorspec.go @@ -0,0 +1,237 @@ +// Copyright The prometheus-operator Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1 + +import ( + monitoringv1 "github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring/v1" + resource "k8s.io/apimachinery/pkg/api/resource" + metav1 "k8s.io/client-go/applyconfigurations/meta/v1" +) + +// PodMonitorSpecApplyConfiguration represents a declarative configuration of the PodMonitorSpec type for use +// with apply. +type PodMonitorSpecApplyConfiguration struct { + JobLabel *string `json:"jobLabel,omitempty"` + PodTargetLabels []string `json:"podTargetLabels,omitempty"` + PodMetricsEndpoints []PodMetricsEndpointApplyConfiguration `json:"podMetricsEndpoints,omitempty"` + Selector *metav1.LabelSelectorApplyConfiguration `json:"selector,omitempty"` + SelectorMechanism *monitoringv1.SelectorMechanism `json:"selectorMechanism,omitempty"` + NamespaceSelector *NamespaceSelectorApplyConfiguration `json:"namespaceSelector,omitempty"` + SampleLimit *uint64 `json:"sampleLimit,omitempty"` + TargetLimit *uint64 `json:"targetLimit,omitempty"` + ScrapeProtocols []monitoringv1.ScrapeProtocol `json:"scrapeProtocols,omitempty"` + FallbackScrapeProtocol *monitoringv1.ScrapeProtocol `json:"fallbackScrapeProtocol,omitempty"` + LabelLimit *uint64 `json:"labelLimit,omitempty"` + LabelNameLengthLimit *uint64 `json:"labelNameLengthLimit,omitempty"` + LabelValueLengthLimit *uint64 `json:"labelValueLengthLimit,omitempty"` + NativeHistogramConfigApplyConfiguration `json:",inline"` + KeepDroppedTargets *uint64 `json:"keepDroppedTargets,omitempty"` + AttachMetadata *AttachMetadataApplyConfiguration `json:"attachMetadata,omitempty"` + ScrapeClassName *string `json:"scrapeClass,omitempty"` + BodySizeLimit *monitoringv1.ByteSize `json:"bodySizeLimit,omitempty"` +} + +// PodMonitorSpecApplyConfiguration constructs a declarative configuration of the PodMonitorSpec type for use with +// apply. +func PodMonitorSpec() *PodMonitorSpecApplyConfiguration { + return &PodMonitorSpecApplyConfiguration{} +} + +// WithJobLabel sets the JobLabel field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the JobLabel field is set to the value of the last call. +func (b *PodMonitorSpecApplyConfiguration) WithJobLabel(value string) *PodMonitorSpecApplyConfiguration { + b.JobLabel = &value + return b +} + +// WithPodTargetLabels adds the given value to the PodTargetLabels field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the PodTargetLabels field. +func (b *PodMonitorSpecApplyConfiguration) WithPodTargetLabels(values ...string) *PodMonitorSpecApplyConfiguration { + for i := range values { + b.PodTargetLabels = append(b.PodTargetLabels, values[i]) + } + return b +} + +// WithPodMetricsEndpoints adds the given value to the PodMetricsEndpoints field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the PodMetricsEndpoints field. +func (b *PodMonitorSpecApplyConfiguration) WithPodMetricsEndpoints(values ...*PodMetricsEndpointApplyConfiguration) *PodMonitorSpecApplyConfiguration { + for i := range values { + if values[i] == nil { + panic("nil value passed to WithPodMetricsEndpoints") + } + b.PodMetricsEndpoints = append(b.PodMetricsEndpoints, *values[i]) + } + return b +} + +// WithSelector sets the Selector field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Selector field is set to the value of the last call. +func (b *PodMonitorSpecApplyConfiguration) WithSelector(value *metav1.LabelSelectorApplyConfiguration) *PodMonitorSpecApplyConfiguration { + b.Selector = value + return b +} + +// WithSelectorMechanism sets the SelectorMechanism field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the SelectorMechanism field is set to the value of the last call. +func (b *PodMonitorSpecApplyConfiguration) WithSelectorMechanism(value monitoringv1.SelectorMechanism) *PodMonitorSpecApplyConfiguration { + b.SelectorMechanism = &value + return b +} + +// WithNamespaceSelector sets the NamespaceSelector field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the NamespaceSelector field is set to the value of the last call. +func (b *PodMonitorSpecApplyConfiguration) WithNamespaceSelector(value *NamespaceSelectorApplyConfiguration) *PodMonitorSpecApplyConfiguration { + b.NamespaceSelector = value + return b +} + +// WithSampleLimit sets the SampleLimit field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the SampleLimit field is set to the value of the last call. +func (b *PodMonitorSpecApplyConfiguration) WithSampleLimit(value uint64) *PodMonitorSpecApplyConfiguration { + b.SampleLimit = &value + return b +} + +// WithTargetLimit sets the TargetLimit field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the TargetLimit field is set to the value of the last call. +func (b *PodMonitorSpecApplyConfiguration) WithTargetLimit(value uint64) *PodMonitorSpecApplyConfiguration { + b.TargetLimit = &value + return b +} + +// WithScrapeProtocols adds the given value to the ScrapeProtocols field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the ScrapeProtocols field. +func (b *PodMonitorSpecApplyConfiguration) WithScrapeProtocols(values ...monitoringv1.ScrapeProtocol) *PodMonitorSpecApplyConfiguration { + for i := range values { + b.ScrapeProtocols = append(b.ScrapeProtocols, values[i]) + } + return b +} + +// WithFallbackScrapeProtocol sets the FallbackScrapeProtocol field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the FallbackScrapeProtocol field is set to the value of the last call. +func (b *PodMonitorSpecApplyConfiguration) WithFallbackScrapeProtocol(value monitoringv1.ScrapeProtocol) *PodMonitorSpecApplyConfiguration { + b.FallbackScrapeProtocol = &value + return b +} + +// WithLabelLimit sets the LabelLimit field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the LabelLimit field is set to the value of the last call. +func (b *PodMonitorSpecApplyConfiguration) WithLabelLimit(value uint64) *PodMonitorSpecApplyConfiguration { + b.LabelLimit = &value + return b +} + +// WithLabelNameLengthLimit sets the LabelNameLengthLimit field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the LabelNameLengthLimit field is set to the value of the last call. +func (b *PodMonitorSpecApplyConfiguration) WithLabelNameLengthLimit(value uint64) *PodMonitorSpecApplyConfiguration { + b.LabelNameLengthLimit = &value + return b +} + +// WithLabelValueLengthLimit sets the LabelValueLengthLimit field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the LabelValueLengthLimit field is set to the value of the last call. +func (b *PodMonitorSpecApplyConfiguration) WithLabelValueLengthLimit(value uint64) *PodMonitorSpecApplyConfiguration { + b.LabelValueLengthLimit = &value + return b +} + +// WithScrapeNativeHistograms sets the ScrapeNativeHistograms field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the ScrapeNativeHistograms field is set to the value of the last call. +func (b *PodMonitorSpecApplyConfiguration) WithScrapeNativeHistograms(value bool) *PodMonitorSpecApplyConfiguration { + b.NativeHistogramConfigApplyConfiguration.ScrapeNativeHistograms = &value + return b +} + +// WithScrapeClassicHistograms sets the ScrapeClassicHistograms field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the ScrapeClassicHistograms field is set to the value of the last call. +func (b *PodMonitorSpecApplyConfiguration) WithScrapeClassicHistograms(value bool) *PodMonitorSpecApplyConfiguration { + b.NativeHistogramConfigApplyConfiguration.ScrapeClassicHistograms = &value + return b +} + +// WithNativeHistogramBucketLimit sets the NativeHistogramBucketLimit field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the NativeHistogramBucketLimit field is set to the value of the last call. +func (b *PodMonitorSpecApplyConfiguration) WithNativeHistogramBucketLimit(value uint64) *PodMonitorSpecApplyConfiguration { + b.NativeHistogramConfigApplyConfiguration.NativeHistogramBucketLimit = &value + return b +} + +// WithNativeHistogramMinBucketFactor sets the NativeHistogramMinBucketFactor field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the NativeHistogramMinBucketFactor field is set to the value of the last call. +func (b *PodMonitorSpecApplyConfiguration) WithNativeHistogramMinBucketFactor(value resource.Quantity) *PodMonitorSpecApplyConfiguration { + b.NativeHistogramConfigApplyConfiguration.NativeHistogramMinBucketFactor = &value + return b +} + +// WithConvertClassicHistogramsToNHCB sets the ConvertClassicHistogramsToNHCB field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the ConvertClassicHistogramsToNHCB field is set to the value of the last call. +func (b *PodMonitorSpecApplyConfiguration) WithConvertClassicHistogramsToNHCB(value bool) *PodMonitorSpecApplyConfiguration { + b.NativeHistogramConfigApplyConfiguration.ConvertClassicHistogramsToNHCB = &value + return b +} + +// WithKeepDroppedTargets sets the KeepDroppedTargets field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the KeepDroppedTargets field is set to the value of the last call. +func (b *PodMonitorSpecApplyConfiguration) WithKeepDroppedTargets(value uint64) *PodMonitorSpecApplyConfiguration { + b.KeepDroppedTargets = &value + return b +} + +// WithAttachMetadata sets the AttachMetadata field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the AttachMetadata field is set to the value of the last call. +func (b *PodMonitorSpecApplyConfiguration) WithAttachMetadata(value *AttachMetadataApplyConfiguration) *PodMonitorSpecApplyConfiguration { + b.AttachMetadata = value + return b +} + +// WithScrapeClassName sets the ScrapeClassName field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the ScrapeClassName field is set to the value of the last call. +func (b *PodMonitorSpecApplyConfiguration) WithScrapeClassName(value string) *PodMonitorSpecApplyConfiguration { + b.ScrapeClassName = &value + return b +} + +// WithBodySizeLimit sets the BodySizeLimit field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the BodySizeLimit field is set to the value of the last call. +func (b *PodMonitorSpecApplyConfiguration) WithBodySizeLimit(value monitoringv1.ByteSize) *PodMonitorSpecApplyConfiguration { + b.BodySizeLimit = &value + return b +} diff --git a/vendor/github.com/prometheus-operator/prometheus-operator/pkg/client/applyconfiguration/monitoring/v1/probe.go b/vendor/github.com/prometheus-operator/prometheus-operator/pkg/client/applyconfiguration/monitoring/v1/probe.go new file mode 100644 index 0000000000..4dac5bd42a --- /dev/null +++ b/vendor/github.com/prometheus-operator/prometheus-operator/pkg/client/applyconfiguration/monitoring/v1/probe.go @@ -0,0 +1,240 @@ +// Copyright The prometheus-operator Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1 + +import ( + apismetav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + types "k8s.io/apimachinery/pkg/types" + metav1 "k8s.io/client-go/applyconfigurations/meta/v1" +) + +// ProbeApplyConfiguration represents a declarative configuration of the Probe type for use +// with apply. +type ProbeApplyConfiguration struct { + metav1.TypeMetaApplyConfiguration `json:",inline"` + *metav1.ObjectMetaApplyConfiguration `json:"metadata,omitempty"` + Spec *ProbeSpecApplyConfiguration `json:"spec,omitempty"` + Status *ConfigResourceStatusApplyConfiguration `json:"status,omitempty"` +} + +// Probe constructs a declarative configuration of the Probe type for use with +// apply. +func Probe(name, namespace string) *ProbeApplyConfiguration { + b := &ProbeApplyConfiguration{} + b.WithName(name) + b.WithNamespace(namespace) + b.WithKind("Probe") + b.WithAPIVersion("monitoring.coreos.com/v1") + return b +} +func (b ProbeApplyConfiguration) IsApplyConfiguration() {} + +// WithKind sets the Kind field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Kind field is set to the value of the last call. +func (b *ProbeApplyConfiguration) WithKind(value string) *ProbeApplyConfiguration { + b.TypeMetaApplyConfiguration.Kind = &value + return b +} + +// WithAPIVersion sets the APIVersion field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the APIVersion field is set to the value of the last call. +func (b *ProbeApplyConfiguration) WithAPIVersion(value string) *ProbeApplyConfiguration { + b.TypeMetaApplyConfiguration.APIVersion = &value + return b +} + +// WithName sets the Name field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Name field is set to the value of the last call. +func (b *ProbeApplyConfiguration) WithName(value string) *ProbeApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.ObjectMetaApplyConfiguration.Name = &value + return b +} + +// WithGenerateName sets the GenerateName field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the GenerateName field is set to the value of the last call. +func (b *ProbeApplyConfiguration) WithGenerateName(value string) *ProbeApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.ObjectMetaApplyConfiguration.GenerateName = &value + return b +} + +// WithNamespace sets the Namespace field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Namespace field is set to the value of the last call. +func (b *ProbeApplyConfiguration) WithNamespace(value string) *ProbeApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.ObjectMetaApplyConfiguration.Namespace = &value + return b +} + +// WithUID sets the UID field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the UID field is set to the value of the last call. +func (b *ProbeApplyConfiguration) WithUID(value types.UID) *ProbeApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.ObjectMetaApplyConfiguration.UID = &value + return b +} + +// WithResourceVersion sets the ResourceVersion field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the ResourceVersion field is set to the value of the last call. +func (b *ProbeApplyConfiguration) WithResourceVersion(value string) *ProbeApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.ObjectMetaApplyConfiguration.ResourceVersion = &value + return b +} + +// WithGeneration sets the Generation field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Generation field is set to the value of the last call. +func (b *ProbeApplyConfiguration) WithGeneration(value int64) *ProbeApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.ObjectMetaApplyConfiguration.Generation = &value + return b +} + +// WithCreationTimestamp sets the CreationTimestamp field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the CreationTimestamp field is set to the value of the last call. +func (b *ProbeApplyConfiguration) WithCreationTimestamp(value apismetav1.Time) *ProbeApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.ObjectMetaApplyConfiguration.CreationTimestamp = &value + return b +} + +// WithDeletionTimestamp sets the DeletionTimestamp field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the DeletionTimestamp field is set to the value of the last call. +func (b *ProbeApplyConfiguration) WithDeletionTimestamp(value apismetav1.Time) *ProbeApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.ObjectMetaApplyConfiguration.DeletionTimestamp = &value + return b +} + +// WithDeletionGracePeriodSeconds sets the DeletionGracePeriodSeconds field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the DeletionGracePeriodSeconds field is set to the value of the last call. +func (b *ProbeApplyConfiguration) WithDeletionGracePeriodSeconds(value int64) *ProbeApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.ObjectMetaApplyConfiguration.DeletionGracePeriodSeconds = &value + return b +} + +// WithLabels puts the entries into the Labels field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, the entries provided by each call will be put on the Labels field, +// overwriting an existing map entries in Labels field with the same key. +func (b *ProbeApplyConfiguration) WithLabels(entries map[string]string) *ProbeApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + if b.ObjectMetaApplyConfiguration.Labels == nil && len(entries) > 0 { + b.ObjectMetaApplyConfiguration.Labels = make(map[string]string, len(entries)) + } + for k, v := range entries { + b.ObjectMetaApplyConfiguration.Labels[k] = v + } + return b +} + +// WithAnnotations puts the entries into the Annotations field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, the entries provided by each call will be put on the Annotations field, +// overwriting an existing map entries in Annotations field with the same key. +func (b *ProbeApplyConfiguration) WithAnnotations(entries map[string]string) *ProbeApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + if b.ObjectMetaApplyConfiguration.Annotations == nil && len(entries) > 0 { + b.ObjectMetaApplyConfiguration.Annotations = make(map[string]string, len(entries)) + } + for k, v := range entries { + b.ObjectMetaApplyConfiguration.Annotations[k] = v + } + return b +} + +// WithOwnerReferences adds the given value to the OwnerReferences field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the OwnerReferences field. +func (b *ProbeApplyConfiguration) WithOwnerReferences(values ...*metav1.OwnerReferenceApplyConfiguration) *ProbeApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + for i := range values { + if values[i] == nil { + panic("nil value passed to WithOwnerReferences") + } + b.ObjectMetaApplyConfiguration.OwnerReferences = append(b.ObjectMetaApplyConfiguration.OwnerReferences, *values[i]) + } + return b +} + +// WithFinalizers adds the given value to the Finalizers field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the Finalizers field. +func (b *ProbeApplyConfiguration) WithFinalizers(values ...string) *ProbeApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + for i := range values { + b.ObjectMetaApplyConfiguration.Finalizers = append(b.ObjectMetaApplyConfiguration.Finalizers, values[i]) + } + return b +} + +func (b *ProbeApplyConfiguration) ensureObjectMetaApplyConfigurationExists() { + if b.ObjectMetaApplyConfiguration == nil { + b.ObjectMetaApplyConfiguration = &metav1.ObjectMetaApplyConfiguration{} + } +} + +// WithSpec sets the Spec field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Spec field is set to the value of the last call. +func (b *ProbeApplyConfiguration) WithSpec(value *ProbeSpecApplyConfiguration) *ProbeApplyConfiguration { + b.Spec = value + return b +} + +// WithStatus sets the Status field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Status field is set to the value of the last call. +func (b *ProbeApplyConfiguration) WithStatus(value *ConfigResourceStatusApplyConfiguration) *ProbeApplyConfiguration { + b.Status = value + return b +} + +// GetKind retrieves the value of the Kind field in the declarative configuration. +func (b *ProbeApplyConfiguration) GetKind() *string { + return b.TypeMetaApplyConfiguration.Kind +} + +// GetAPIVersion retrieves the value of the APIVersion field in the declarative configuration. +func (b *ProbeApplyConfiguration) GetAPIVersion() *string { + return b.TypeMetaApplyConfiguration.APIVersion +} + +// GetName retrieves the value of the Name field in the declarative configuration. +func (b *ProbeApplyConfiguration) GetName() *string { + b.ensureObjectMetaApplyConfigurationExists() + return b.ObjectMetaApplyConfiguration.Name +} + +// GetNamespace retrieves the value of the Namespace field in the declarative configuration. +func (b *ProbeApplyConfiguration) GetNamespace() *string { + b.ensureObjectMetaApplyConfigurationExists() + return b.ObjectMetaApplyConfiguration.Namespace +} diff --git a/vendor/github.com/prometheus-operator/prometheus-operator/pkg/client/applyconfiguration/monitoring/v1/probeparam.go b/vendor/github.com/prometheus-operator/prometheus-operator/pkg/client/applyconfiguration/monitoring/v1/probeparam.go new file mode 100644 index 0000000000..7622658c66 --- /dev/null +++ b/vendor/github.com/prometheus-operator/prometheus-operator/pkg/client/applyconfiguration/monitoring/v1/probeparam.go @@ -0,0 +1,48 @@ +// Copyright The prometheus-operator Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1 + +// ProbeParamApplyConfiguration represents a declarative configuration of the ProbeParam type for use +// with apply. +type ProbeParamApplyConfiguration struct { + Name *string `json:"name,omitempty"` + Values []string `json:"values,omitempty"` +} + +// ProbeParamApplyConfiguration constructs a declarative configuration of the ProbeParam type for use with +// apply. +func ProbeParam() *ProbeParamApplyConfiguration { + return &ProbeParamApplyConfiguration{} +} + +// WithName sets the Name field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Name field is set to the value of the last call. +func (b *ProbeParamApplyConfiguration) WithName(value string) *ProbeParamApplyConfiguration { + b.Name = &value + return b +} + +// WithValues adds the given value to the Values field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the Values field. +func (b *ProbeParamApplyConfiguration) WithValues(values ...string) *ProbeParamApplyConfiguration { + for i := range values { + b.Values = append(b.Values, values[i]) + } + return b +} diff --git a/vendor/github.com/prometheus-operator/prometheus-operator/pkg/client/applyconfiguration/monitoring/v1/proberspec.go b/vendor/github.com/prometheus-operator/prometheus-operator/pkg/client/applyconfiguration/monitoring/v1/proberspec.go new file mode 100644 index 0000000000..9931822be9 --- /dev/null +++ b/vendor/github.com/prometheus-operator/prometheus-operator/pkg/client/applyconfiguration/monitoring/v1/proberspec.go @@ -0,0 +1,99 @@ +// Copyright The prometheus-operator Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1 + +import ( + monitoringv1 "github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring/v1" + corev1 "k8s.io/api/core/v1" +) + +// ProberSpecApplyConfiguration represents a declarative configuration of the ProberSpec type for use +// with apply. +type ProberSpecApplyConfiguration struct { + URL *string `json:"url,omitempty"` + Scheme *monitoringv1.Scheme `json:"scheme,omitempty"` + Path *string `json:"path,omitempty"` + ProxyConfigApplyConfiguration `json:",inline"` +} + +// ProberSpecApplyConfiguration constructs a declarative configuration of the ProberSpec type for use with +// apply. +func ProberSpec() *ProberSpecApplyConfiguration { + return &ProberSpecApplyConfiguration{} +} + +// WithURL sets the URL field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the URL field is set to the value of the last call. +func (b *ProberSpecApplyConfiguration) WithURL(value string) *ProberSpecApplyConfiguration { + b.URL = &value + return b +} + +// WithScheme sets the Scheme field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Scheme field is set to the value of the last call. +func (b *ProberSpecApplyConfiguration) WithScheme(value monitoringv1.Scheme) *ProberSpecApplyConfiguration { + b.Scheme = &value + return b +} + +// WithPath sets the Path field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Path field is set to the value of the last call. +func (b *ProberSpecApplyConfiguration) WithPath(value string) *ProberSpecApplyConfiguration { + b.Path = &value + return b +} + +// WithProxyURL sets the ProxyURL field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the ProxyURL field is set to the value of the last call. +func (b *ProberSpecApplyConfiguration) WithProxyURL(value string) *ProberSpecApplyConfiguration { + b.ProxyConfigApplyConfiguration.ProxyURL = &value + return b +} + +// WithNoProxy sets the NoProxy field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the NoProxy field is set to the value of the last call. +func (b *ProberSpecApplyConfiguration) WithNoProxy(value string) *ProberSpecApplyConfiguration { + b.ProxyConfigApplyConfiguration.NoProxy = &value + return b +} + +// WithProxyFromEnvironment sets the ProxyFromEnvironment field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the ProxyFromEnvironment field is set to the value of the last call. +func (b *ProberSpecApplyConfiguration) WithProxyFromEnvironment(value bool) *ProberSpecApplyConfiguration { + b.ProxyConfigApplyConfiguration.ProxyFromEnvironment = &value + return b +} + +// WithProxyConnectHeader puts the entries into the ProxyConnectHeader field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, the entries provided by each call will be put on the ProxyConnectHeader field, +// overwriting an existing map entries in ProxyConnectHeader field with the same key. +func (b *ProberSpecApplyConfiguration) WithProxyConnectHeader(entries map[string][]corev1.SecretKeySelector) *ProberSpecApplyConfiguration { + if b.ProxyConfigApplyConfiguration.ProxyConnectHeader == nil && len(entries) > 0 { + b.ProxyConfigApplyConfiguration.ProxyConnectHeader = make(map[string][]corev1.SecretKeySelector, len(entries)) + } + for k, v := range entries { + b.ProxyConfigApplyConfiguration.ProxyConnectHeader[k] = v + } + return b +} diff --git a/vendor/github.com/prometheus-operator/prometheus-operator/pkg/client/applyconfiguration/monitoring/v1/probespec.go b/vendor/github.com/prometheus-operator/prometheus-operator/pkg/client/applyconfiguration/monitoring/v1/probespec.go new file mode 100644 index 0000000000..6be60f3a13 --- /dev/null +++ b/vendor/github.com/prometheus-operator/prometheus-operator/pkg/client/applyconfiguration/monitoring/v1/probespec.go @@ -0,0 +1,298 @@ +// Copyright The prometheus-operator Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1 + +import ( + monitoringv1 "github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring/v1" + corev1 "k8s.io/api/core/v1" + resource "k8s.io/apimachinery/pkg/api/resource" +) + +// ProbeSpecApplyConfiguration represents a declarative configuration of the ProbeSpec type for use +// with apply. +type ProbeSpecApplyConfiguration struct { + JobName *string `json:"jobName,omitempty"` + ProberSpec *ProberSpecApplyConfiguration `json:"prober,omitempty"` + Module *string `json:"module,omitempty"` + Targets *ProbeTargetsApplyConfiguration `json:"targets,omitempty"` + Interval *monitoringv1.Duration `json:"interval,omitempty"` + ScrapeTimeout *monitoringv1.Duration `json:"scrapeTimeout,omitempty"` + MetricRelabelConfigs []RelabelConfigApplyConfiguration `json:"metricRelabelings,omitempty"` + Authorization *SafeAuthorizationApplyConfiguration `json:"authorization,omitempty"` + SampleLimit *uint64 `json:"sampleLimit,omitempty"` + TargetLimit *uint64 `json:"targetLimit,omitempty"` + ScrapeProtocols []monitoringv1.ScrapeProtocol `json:"scrapeProtocols,omitempty"` + FallbackScrapeProtocol *monitoringv1.ScrapeProtocol `json:"fallbackScrapeProtocol,omitempty"` + LabelLimit *uint64 `json:"labelLimit,omitempty"` + LabelNameLengthLimit *uint64 `json:"labelNameLengthLimit,omitempty"` + LabelValueLengthLimit *uint64 `json:"labelValueLengthLimit,omitempty"` + NativeHistogramConfigApplyConfiguration `json:",inline"` + KeepDroppedTargets *uint64 `json:"keepDroppedTargets,omitempty"` + ScrapeClassName *string `json:"scrapeClass,omitempty"` + Params []ProbeParamApplyConfiguration `json:"params,omitempty"` + HTTPConfigApplyConfiguration `json:",inline"` +} + +// ProbeSpecApplyConfiguration constructs a declarative configuration of the ProbeSpec type for use with +// apply. +func ProbeSpec() *ProbeSpecApplyConfiguration { + return &ProbeSpecApplyConfiguration{} +} + +// WithJobName sets the JobName field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the JobName field is set to the value of the last call. +func (b *ProbeSpecApplyConfiguration) WithJobName(value string) *ProbeSpecApplyConfiguration { + b.JobName = &value + return b +} + +// WithProberSpec sets the ProberSpec field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the ProberSpec field is set to the value of the last call. +func (b *ProbeSpecApplyConfiguration) WithProberSpec(value *ProberSpecApplyConfiguration) *ProbeSpecApplyConfiguration { + b.ProberSpec = value + return b +} + +// WithModule sets the Module field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Module field is set to the value of the last call. +func (b *ProbeSpecApplyConfiguration) WithModule(value string) *ProbeSpecApplyConfiguration { + b.Module = &value + return b +} + +// WithTargets sets the Targets field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Targets field is set to the value of the last call. +func (b *ProbeSpecApplyConfiguration) WithTargets(value *ProbeTargetsApplyConfiguration) *ProbeSpecApplyConfiguration { + b.Targets = value + return b +} + +// WithInterval sets the Interval field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Interval field is set to the value of the last call. +func (b *ProbeSpecApplyConfiguration) WithInterval(value monitoringv1.Duration) *ProbeSpecApplyConfiguration { + b.Interval = &value + return b +} + +// WithScrapeTimeout sets the ScrapeTimeout field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the ScrapeTimeout field is set to the value of the last call. +func (b *ProbeSpecApplyConfiguration) WithScrapeTimeout(value monitoringv1.Duration) *ProbeSpecApplyConfiguration { + b.ScrapeTimeout = &value + return b +} + +// WithMetricRelabelConfigs adds the given value to the MetricRelabelConfigs field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the MetricRelabelConfigs field. +func (b *ProbeSpecApplyConfiguration) WithMetricRelabelConfigs(values ...*RelabelConfigApplyConfiguration) *ProbeSpecApplyConfiguration { + for i := range values { + if values[i] == nil { + panic("nil value passed to WithMetricRelabelConfigs") + } + b.MetricRelabelConfigs = append(b.MetricRelabelConfigs, *values[i]) + } + return b +} + +// WithAuthorization sets the Authorization field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Authorization field is set to the value of the last call. +func (b *ProbeSpecApplyConfiguration) WithAuthorization(value *SafeAuthorizationApplyConfiguration) *ProbeSpecApplyConfiguration { + b.Authorization = value + return b +} + +// WithSampleLimit sets the SampleLimit field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the SampleLimit field is set to the value of the last call. +func (b *ProbeSpecApplyConfiguration) WithSampleLimit(value uint64) *ProbeSpecApplyConfiguration { + b.SampleLimit = &value + return b +} + +// WithTargetLimit sets the TargetLimit field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the TargetLimit field is set to the value of the last call. +func (b *ProbeSpecApplyConfiguration) WithTargetLimit(value uint64) *ProbeSpecApplyConfiguration { + b.TargetLimit = &value + return b +} + +// WithScrapeProtocols adds the given value to the ScrapeProtocols field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the ScrapeProtocols field. +func (b *ProbeSpecApplyConfiguration) WithScrapeProtocols(values ...monitoringv1.ScrapeProtocol) *ProbeSpecApplyConfiguration { + for i := range values { + b.ScrapeProtocols = append(b.ScrapeProtocols, values[i]) + } + return b +} + +// WithFallbackScrapeProtocol sets the FallbackScrapeProtocol field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the FallbackScrapeProtocol field is set to the value of the last call. +func (b *ProbeSpecApplyConfiguration) WithFallbackScrapeProtocol(value monitoringv1.ScrapeProtocol) *ProbeSpecApplyConfiguration { + b.FallbackScrapeProtocol = &value + return b +} + +// WithLabelLimit sets the LabelLimit field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the LabelLimit field is set to the value of the last call. +func (b *ProbeSpecApplyConfiguration) WithLabelLimit(value uint64) *ProbeSpecApplyConfiguration { + b.LabelLimit = &value + return b +} + +// WithLabelNameLengthLimit sets the LabelNameLengthLimit field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the LabelNameLengthLimit field is set to the value of the last call. +func (b *ProbeSpecApplyConfiguration) WithLabelNameLengthLimit(value uint64) *ProbeSpecApplyConfiguration { + b.LabelNameLengthLimit = &value + return b +} + +// WithLabelValueLengthLimit sets the LabelValueLengthLimit field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the LabelValueLengthLimit field is set to the value of the last call. +func (b *ProbeSpecApplyConfiguration) WithLabelValueLengthLimit(value uint64) *ProbeSpecApplyConfiguration { + b.LabelValueLengthLimit = &value + return b +} + +// WithScrapeNativeHistograms sets the ScrapeNativeHistograms field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the ScrapeNativeHistograms field is set to the value of the last call. +func (b *ProbeSpecApplyConfiguration) WithScrapeNativeHistograms(value bool) *ProbeSpecApplyConfiguration { + b.NativeHistogramConfigApplyConfiguration.ScrapeNativeHistograms = &value + return b +} + +// WithScrapeClassicHistograms sets the ScrapeClassicHistograms field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the ScrapeClassicHistograms field is set to the value of the last call. +func (b *ProbeSpecApplyConfiguration) WithScrapeClassicHistograms(value bool) *ProbeSpecApplyConfiguration { + b.NativeHistogramConfigApplyConfiguration.ScrapeClassicHistograms = &value + return b +} + +// WithNativeHistogramBucketLimit sets the NativeHistogramBucketLimit field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the NativeHistogramBucketLimit field is set to the value of the last call. +func (b *ProbeSpecApplyConfiguration) WithNativeHistogramBucketLimit(value uint64) *ProbeSpecApplyConfiguration { + b.NativeHistogramConfigApplyConfiguration.NativeHistogramBucketLimit = &value + return b +} + +// WithNativeHistogramMinBucketFactor sets the NativeHistogramMinBucketFactor field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the NativeHistogramMinBucketFactor field is set to the value of the last call. +func (b *ProbeSpecApplyConfiguration) WithNativeHistogramMinBucketFactor(value resource.Quantity) *ProbeSpecApplyConfiguration { + b.NativeHistogramConfigApplyConfiguration.NativeHistogramMinBucketFactor = &value + return b +} + +// WithConvertClassicHistogramsToNHCB sets the ConvertClassicHistogramsToNHCB field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the ConvertClassicHistogramsToNHCB field is set to the value of the last call. +func (b *ProbeSpecApplyConfiguration) WithConvertClassicHistogramsToNHCB(value bool) *ProbeSpecApplyConfiguration { + b.NativeHistogramConfigApplyConfiguration.ConvertClassicHistogramsToNHCB = &value + return b +} + +// WithKeepDroppedTargets sets the KeepDroppedTargets field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the KeepDroppedTargets field is set to the value of the last call. +func (b *ProbeSpecApplyConfiguration) WithKeepDroppedTargets(value uint64) *ProbeSpecApplyConfiguration { + b.KeepDroppedTargets = &value + return b +} + +// WithScrapeClassName sets the ScrapeClassName field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the ScrapeClassName field is set to the value of the last call. +func (b *ProbeSpecApplyConfiguration) WithScrapeClassName(value string) *ProbeSpecApplyConfiguration { + b.ScrapeClassName = &value + return b +} + +// WithParams adds the given value to the Params field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the Params field. +func (b *ProbeSpecApplyConfiguration) WithParams(values ...*ProbeParamApplyConfiguration) *ProbeSpecApplyConfiguration { + for i := range values { + if values[i] == nil { + panic("nil value passed to WithParams") + } + b.Params = append(b.Params, *values[i]) + } + return b +} + +// WithBasicAuth sets the BasicAuth field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the BasicAuth field is set to the value of the last call. +func (b *ProbeSpecApplyConfiguration) WithBasicAuth(value *BasicAuthApplyConfiguration) *ProbeSpecApplyConfiguration { + b.HTTPConfigWithoutTLSApplyConfiguration.BasicAuth = value + return b +} + +// WithOAuth2 sets the OAuth2 field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the OAuth2 field is set to the value of the last call. +func (b *ProbeSpecApplyConfiguration) WithOAuth2(value *OAuth2ApplyConfiguration) *ProbeSpecApplyConfiguration { + b.HTTPConfigWithoutTLSApplyConfiguration.OAuth2 = value + return b +} + +// WithBearerTokenSecret sets the BearerTokenSecret field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the BearerTokenSecret field is set to the value of the last call. +func (b *ProbeSpecApplyConfiguration) WithBearerTokenSecret(value corev1.SecretKeySelector) *ProbeSpecApplyConfiguration { + b.HTTPConfigWithoutTLSApplyConfiguration.BearerTokenSecret = &value + return b +} + +// WithFollowRedirects sets the FollowRedirects field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the FollowRedirects field is set to the value of the last call. +func (b *ProbeSpecApplyConfiguration) WithFollowRedirects(value bool) *ProbeSpecApplyConfiguration { + b.HTTPConfigWithoutTLSApplyConfiguration.FollowRedirects = &value + return b +} + +// WithEnableHTTP2 sets the EnableHTTP2 field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the EnableHTTP2 field is set to the value of the last call. +func (b *ProbeSpecApplyConfiguration) WithEnableHTTP2(value bool) *ProbeSpecApplyConfiguration { + b.HTTPConfigWithoutTLSApplyConfiguration.EnableHTTP2 = &value + return b +} + +// WithTLSConfig sets the TLSConfig field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the TLSConfig field is set to the value of the last call. +func (b *ProbeSpecApplyConfiguration) WithTLSConfig(value *SafeTLSConfigApplyConfiguration) *ProbeSpecApplyConfiguration { + b.HTTPConfigApplyConfiguration.TLSConfig = value + return b +} diff --git a/vendor/github.com/prometheus-operator/prometheus-operator/pkg/client/applyconfiguration/monitoring/v1/probetargetingress.go b/vendor/github.com/prometheus-operator/prometheus-operator/pkg/client/applyconfiguration/monitoring/v1/probetargetingress.go new file mode 100644 index 0000000000..7576018db1 --- /dev/null +++ b/vendor/github.com/prometheus-operator/prometheus-operator/pkg/client/applyconfiguration/monitoring/v1/probetargetingress.go @@ -0,0 +1,64 @@ +// Copyright The prometheus-operator Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1 + +import ( + metav1 "k8s.io/client-go/applyconfigurations/meta/v1" +) + +// ProbeTargetIngressApplyConfiguration represents a declarative configuration of the ProbeTargetIngress type for use +// with apply. +type ProbeTargetIngressApplyConfiguration struct { + Selector *metav1.LabelSelectorApplyConfiguration `json:"selector,omitempty"` + NamespaceSelector *NamespaceSelectorApplyConfiguration `json:"namespaceSelector,omitempty"` + RelabelConfigs []RelabelConfigApplyConfiguration `json:"relabelingConfigs,omitempty"` +} + +// ProbeTargetIngressApplyConfiguration constructs a declarative configuration of the ProbeTargetIngress type for use with +// apply. +func ProbeTargetIngress() *ProbeTargetIngressApplyConfiguration { + return &ProbeTargetIngressApplyConfiguration{} +} + +// WithSelector sets the Selector field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Selector field is set to the value of the last call. +func (b *ProbeTargetIngressApplyConfiguration) WithSelector(value *metav1.LabelSelectorApplyConfiguration) *ProbeTargetIngressApplyConfiguration { + b.Selector = value + return b +} + +// WithNamespaceSelector sets the NamespaceSelector field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the NamespaceSelector field is set to the value of the last call. +func (b *ProbeTargetIngressApplyConfiguration) WithNamespaceSelector(value *NamespaceSelectorApplyConfiguration) *ProbeTargetIngressApplyConfiguration { + b.NamespaceSelector = value + return b +} + +// WithRelabelConfigs adds the given value to the RelabelConfigs field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the RelabelConfigs field. +func (b *ProbeTargetIngressApplyConfiguration) WithRelabelConfigs(values ...*RelabelConfigApplyConfiguration) *ProbeTargetIngressApplyConfiguration { + for i := range values { + if values[i] == nil { + panic("nil value passed to WithRelabelConfigs") + } + b.RelabelConfigs = append(b.RelabelConfigs, *values[i]) + } + return b +} diff --git a/vendor/github.com/prometheus-operator/prometheus-operator/pkg/client/applyconfiguration/monitoring/v1/probetargets.go b/vendor/github.com/prometheus-operator/prometheus-operator/pkg/client/applyconfiguration/monitoring/v1/probetargets.go new file mode 100644 index 0000000000..ffa5d07af4 --- /dev/null +++ b/vendor/github.com/prometheus-operator/prometheus-operator/pkg/client/applyconfiguration/monitoring/v1/probetargets.go @@ -0,0 +1,46 @@ +// Copyright The prometheus-operator Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1 + +// ProbeTargetsApplyConfiguration represents a declarative configuration of the ProbeTargets type for use +// with apply. +type ProbeTargetsApplyConfiguration struct { + StaticConfig *ProbeTargetStaticConfigApplyConfiguration `json:"staticConfig,omitempty"` + Ingress *ProbeTargetIngressApplyConfiguration `json:"ingress,omitempty"` +} + +// ProbeTargetsApplyConfiguration constructs a declarative configuration of the ProbeTargets type for use with +// apply. +func ProbeTargets() *ProbeTargetsApplyConfiguration { + return &ProbeTargetsApplyConfiguration{} +} + +// WithStaticConfig sets the StaticConfig field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the StaticConfig field is set to the value of the last call. +func (b *ProbeTargetsApplyConfiguration) WithStaticConfig(value *ProbeTargetStaticConfigApplyConfiguration) *ProbeTargetsApplyConfiguration { + b.StaticConfig = value + return b +} + +// WithIngress sets the Ingress field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Ingress field is set to the value of the last call. +func (b *ProbeTargetsApplyConfiguration) WithIngress(value *ProbeTargetIngressApplyConfiguration) *ProbeTargetsApplyConfiguration { + b.Ingress = value + return b +} diff --git a/vendor/github.com/prometheus-operator/prometheus-operator/pkg/client/applyconfiguration/monitoring/v1/probetargetstaticconfig.go b/vendor/github.com/prometheus-operator/prometheus-operator/pkg/client/applyconfiguration/monitoring/v1/probetargetstaticconfig.go new file mode 100644 index 0000000000..8ee5f23a74 --- /dev/null +++ b/vendor/github.com/prometheus-operator/prometheus-operator/pkg/client/applyconfiguration/monitoring/v1/probetargetstaticconfig.go @@ -0,0 +1,68 @@ +// Copyright The prometheus-operator Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1 + +// ProbeTargetStaticConfigApplyConfiguration represents a declarative configuration of the ProbeTargetStaticConfig type for use +// with apply. +type ProbeTargetStaticConfigApplyConfiguration struct { + Targets []string `json:"static,omitempty"` + Labels map[string]string `json:"labels,omitempty"` + RelabelConfigs []RelabelConfigApplyConfiguration `json:"relabelingConfigs,omitempty"` +} + +// ProbeTargetStaticConfigApplyConfiguration constructs a declarative configuration of the ProbeTargetStaticConfig type for use with +// apply. +func ProbeTargetStaticConfig() *ProbeTargetStaticConfigApplyConfiguration { + return &ProbeTargetStaticConfigApplyConfiguration{} +} + +// WithTargets adds the given value to the Targets field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the Targets field. +func (b *ProbeTargetStaticConfigApplyConfiguration) WithTargets(values ...string) *ProbeTargetStaticConfigApplyConfiguration { + for i := range values { + b.Targets = append(b.Targets, values[i]) + } + return b +} + +// WithLabels puts the entries into the Labels field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, the entries provided by each call will be put on the Labels field, +// overwriting an existing map entries in Labels field with the same key. +func (b *ProbeTargetStaticConfigApplyConfiguration) WithLabels(entries map[string]string) *ProbeTargetStaticConfigApplyConfiguration { + if b.Labels == nil && len(entries) > 0 { + b.Labels = make(map[string]string, len(entries)) + } + for k, v := range entries { + b.Labels[k] = v + } + return b +} + +// WithRelabelConfigs adds the given value to the RelabelConfigs field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the RelabelConfigs field. +func (b *ProbeTargetStaticConfigApplyConfiguration) WithRelabelConfigs(values ...*RelabelConfigApplyConfiguration) *ProbeTargetStaticConfigApplyConfiguration { + for i := range values { + if values[i] == nil { + panic("nil value passed to WithRelabelConfigs") + } + b.RelabelConfigs = append(b.RelabelConfigs, *values[i]) + } + return b +} diff --git a/vendor/github.com/prometheus-operator/prometheus-operator/pkg/client/applyconfiguration/monitoring/v1/prometheus.go b/vendor/github.com/prometheus-operator/prometheus-operator/pkg/client/applyconfiguration/monitoring/v1/prometheus.go new file mode 100644 index 0000000000..345d3b1946 --- /dev/null +++ b/vendor/github.com/prometheus-operator/prometheus-operator/pkg/client/applyconfiguration/monitoring/v1/prometheus.go @@ -0,0 +1,240 @@ +// Copyright The prometheus-operator Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1 + +import ( + apismetav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + types "k8s.io/apimachinery/pkg/types" + metav1 "k8s.io/client-go/applyconfigurations/meta/v1" +) + +// PrometheusApplyConfiguration represents a declarative configuration of the Prometheus type for use +// with apply. +type PrometheusApplyConfiguration struct { + metav1.TypeMetaApplyConfiguration `json:",inline"` + *metav1.ObjectMetaApplyConfiguration `json:"metadata,omitempty"` + Spec *PrometheusSpecApplyConfiguration `json:"spec,omitempty"` + Status *PrometheusStatusApplyConfiguration `json:"status,omitempty"` +} + +// Prometheus constructs a declarative configuration of the Prometheus type for use with +// apply. +func Prometheus(name, namespace string) *PrometheusApplyConfiguration { + b := &PrometheusApplyConfiguration{} + b.WithName(name) + b.WithNamespace(namespace) + b.WithKind("Prometheus") + b.WithAPIVersion("monitoring.coreos.com/v1") + return b +} +func (b PrometheusApplyConfiguration) IsApplyConfiguration() {} + +// WithKind sets the Kind field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Kind field is set to the value of the last call. +func (b *PrometheusApplyConfiguration) WithKind(value string) *PrometheusApplyConfiguration { + b.TypeMetaApplyConfiguration.Kind = &value + return b +} + +// WithAPIVersion sets the APIVersion field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the APIVersion field is set to the value of the last call. +func (b *PrometheusApplyConfiguration) WithAPIVersion(value string) *PrometheusApplyConfiguration { + b.TypeMetaApplyConfiguration.APIVersion = &value + return b +} + +// WithName sets the Name field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Name field is set to the value of the last call. +func (b *PrometheusApplyConfiguration) WithName(value string) *PrometheusApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.ObjectMetaApplyConfiguration.Name = &value + return b +} + +// WithGenerateName sets the GenerateName field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the GenerateName field is set to the value of the last call. +func (b *PrometheusApplyConfiguration) WithGenerateName(value string) *PrometheusApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.ObjectMetaApplyConfiguration.GenerateName = &value + return b +} + +// WithNamespace sets the Namespace field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Namespace field is set to the value of the last call. +func (b *PrometheusApplyConfiguration) WithNamespace(value string) *PrometheusApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.ObjectMetaApplyConfiguration.Namespace = &value + return b +} + +// WithUID sets the UID field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the UID field is set to the value of the last call. +func (b *PrometheusApplyConfiguration) WithUID(value types.UID) *PrometheusApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.ObjectMetaApplyConfiguration.UID = &value + return b +} + +// WithResourceVersion sets the ResourceVersion field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the ResourceVersion field is set to the value of the last call. +func (b *PrometheusApplyConfiguration) WithResourceVersion(value string) *PrometheusApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.ObjectMetaApplyConfiguration.ResourceVersion = &value + return b +} + +// WithGeneration sets the Generation field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Generation field is set to the value of the last call. +func (b *PrometheusApplyConfiguration) WithGeneration(value int64) *PrometheusApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.ObjectMetaApplyConfiguration.Generation = &value + return b +} + +// WithCreationTimestamp sets the CreationTimestamp field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the CreationTimestamp field is set to the value of the last call. +func (b *PrometheusApplyConfiguration) WithCreationTimestamp(value apismetav1.Time) *PrometheusApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.ObjectMetaApplyConfiguration.CreationTimestamp = &value + return b +} + +// WithDeletionTimestamp sets the DeletionTimestamp field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the DeletionTimestamp field is set to the value of the last call. +func (b *PrometheusApplyConfiguration) WithDeletionTimestamp(value apismetav1.Time) *PrometheusApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.ObjectMetaApplyConfiguration.DeletionTimestamp = &value + return b +} + +// WithDeletionGracePeriodSeconds sets the DeletionGracePeriodSeconds field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the DeletionGracePeriodSeconds field is set to the value of the last call. +func (b *PrometheusApplyConfiguration) WithDeletionGracePeriodSeconds(value int64) *PrometheusApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.ObjectMetaApplyConfiguration.DeletionGracePeriodSeconds = &value + return b +} + +// WithLabels puts the entries into the Labels field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, the entries provided by each call will be put on the Labels field, +// overwriting an existing map entries in Labels field with the same key. +func (b *PrometheusApplyConfiguration) WithLabels(entries map[string]string) *PrometheusApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + if b.ObjectMetaApplyConfiguration.Labels == nil && len(entries) > 0 { + b.ObjectMetaApplyConfiguration.Labels = make(map[string]string, len(entries)) + } + for k, v := range entries { + b.ObjectMetaApplyConfiguration.Labels[k] = v + } + return b +} + +// WithAnnotations puts the entries into the Annotations field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, the entries provided by each call will be put on the Annotations field, +// overwriting an existing map entries in Annotations field with the same key. +func (b *PrometheusApplyConfiguration) WithAnnotations(entries map[string]string) *PrometheusApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + if b.ObjectMetaApplyConfiguration.Annotations == nil && len(entries) > 0 { + b.ObjectMetaApplyConfiguration.Annotations = make(map[string]string, len(entries)) + } + for k, v := range entries { + b.ObjectMetaApplyConfiguration.Annotations[k] = v + } + return b +} + +// WithOwnerReferences adds the given value to the OwnerReferences field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the OwnerReferences field. +func (b *PrometheusApplyConfiguration) WithOwnerReferences(values ...*metav1.OwnerReferenceApplyConfiguration) *PrometheusApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + for i := range values { + if values[i] == nil { + panic("nil value passed to WithOwnerReferences") + } + b.ObjectMetaApplyConfiguration.OwnerReferences = append(b.ObjectMetaApplyConfiguration.OwnerReferences, *values[i]) + } + return b +} + +// WithFinalizers adds the given value to the Finalizers field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the Finalizers field. +func (b *PrometheusApplyConfiguration) WithFinalizers(values ...string) *PrometheusApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + for i := range values { + b.ObjectMetaApplyConfiguration.Finalizers = append(b.ObjectMetaApplyConfiguration.Finalizers, values[i]) + } + return b +} + +func (b *PrometheusApplyConfiguration) ensureObjectMetaApplyConfigurationExists() { + if b.ObjectMetaApplyConfiguration == nil { + b.ObjectMetaApplyConfiguration = &metav1.ObjectMetaApplyConfiguration{} + } +} + +// WithSpec sets the Spec field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Spec field is set to the value of the last call. +func (b *PrometheusApplyConfiguration) WithSpec(value *PrometheusSpecApplyConfiguration) *PrometheusApplyConfiguration { + b.Spec = value + return b +} + +// WithStatus sets the Status field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Status field is set to the value of the last call. +func (b *PrometheusApplyConfiguration) WithStatus(value *PrometheusStatusApplyConfiguration) *PrometheusApplyConfiguration { + b.Status = value + return b +} + +// GetKind retrieves the value of the Kind field in the declarative configuration. +func (b *PrometheusApplyConfiguration) GetKind() *string { + return b.TypeMetaApplyConfiguration.Kind +} + +// GetAPIVersion retrieves the value of the APIVersion field in the declarative configuration. +func (b *PrometheusApplyConfiguration) GetAPIVersion() *string { + return b.TypeMetaApplyConfiguration.APIVersion +} + +// GetName retrieves the value of the Name field in the declarative configuration. +func (b *PrometheusApplyConfiguration) GetName() *string { + b.ensureObjectMetaApplyConfigurationExists() + return b.ObjectMetaApplyConfiguration.Name +} + +// GetNamespace retrieves the value of the Namespace field in the declarative configuration. +func (b *PrometheusApplyConfiguration) GetNamespace() *string { + b.ensureObjectMetaApplyConfigurationExists() + return b.ObjectMetaApplyConfiguration.Namespace +} diff --git a/vendor/github.com/prometheus-operator/prometheus-operator/pkg/client/applyconfiguration/monitoring/v1/prometheusrule.go b/vendor/github.com/prometheus-operator/prometheus-operator/pkg/client/applyconfiguration/monitoring/v1/prometheusrule.go new file mode 100644 index 0000000000..74f601c6e8 --- /dev/null +++ b/vendor/github.com/prometheus-operator/prometheus-operator/pkg/client/applyconfiguration/monitoring/v1/prometheusrule.go @@ -0,0 +1,240 @@ +// Copyright The prometheus-operator Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1 + +import ( + apismetav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + types "k8s.io/apimachinery/pkg/types" + metav1 "k8s.io/client-go/applyconfigurations/meta/v1" +) + +// PrometheusRuleApplyConfiguration represents a declarative configuration of the PrometheusRule type for use +// with apply. +type PrometheusRuleApplyConfiguration struct { + metav1.TypeMetaApplyConfiguration `json:",inline"` + *metav1.ObjectMetaApplyConfiguration `json:"metadata,omitempty"` + Spec *PrometheusRuleSpecApplyConfiguration `json:"spec,omitempty"` + Status *ConfigResourceStatusApplyConfiguration `json:"status,omitempty"` +} + +// PrometheusRule constructs a declarative configuration of the PrometheusRule type for use with +// apply. +func PrometheusRule(name, namespace string) *PrometheusRuleApplyConfiguration { + b := &PrometheusRuleApplyConfiguration{} + b.WithName(name) + b.WithNamespace(namespace) + b.WithKind("PrometheusRule") + b.WithAPIVersion("monitoring.coreos.com/v1") + return b +} +func (b PrometheusRuleApplyConfiguration) IsApplyConfiguration() {} + +// WithKind sets the Kind field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Kind field is set to the value of the last call. +func (b *PrometheusRuleApplyConfiguration) WithKind(value string) *PrometheusRuleApplyConfiguration { + b.TypeMetaApplyConfiguration.Kind = &value + return b +} + +// WithAPIVersion sets the APIVersion field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the APIVersion field is set to the value of the last call. +func (b *PrometheusRuleApplyConfiguration) WithAPIVersion(value string) *PrometheusRuleApplyConfiguration { + b.TypeMetaApplyConfiguration.APIVersion = &value + return b +} + +// WithName sets the Name field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Name field is set to the value of the last call. +func (b *PrometheusRuleApplyConfiguration) WithName(value string) *PrometheusRuleApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.ObjectMetaApplyConfiguration.Name = &value + return b +} + +// WithGenerateName sets the GenerateName field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the GenerateName field is set to the value of the last call. +func (b *PrometheusRuleApplyConfiguration) WithGenerateName(value string) *PrometheusRuleApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.ObjectMetaApplyConfiguration.GenerateName = &value + return b +} + +// WithNamespace sets the Namespace field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Namespace field is set to the value of the last call. +func (b *PrometheusRuleApplyConfiguration) WithNamespace(value string) *PrometheusRuleApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.ObjectMetaApplyConfiguration.Namespace = &value + return b +} + +// WithUID sets the UID field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the UID field is set to the value of the last call. +func (b *PrometheusRuleApplyConfiguration) WithUID(value types.UID) *PrometheusRuleApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.ObjectMetaApplyConfiguration.UID = &value + return b +} + +// WithResourceVersion sets the ResourceVersion field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the ResourceVersion field is set to the value of the last call. +func (b *PrometheusRuleApplyConfiguration) WithResourceVersion(value string) *PrometheusRuleApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.ObjectMetaApplyConfiguration.ResourceVersion = &value + return b +} + +// WithGeneration sets the Generation field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Generation field is set to the value of the last call. +func (b *PrometheusRuleApplyConfiguration) WithGeneration(value int64) *PrometheusRuleApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.ObjectMetaApplyConfiguration.Generation = &value + return b +} + +// WithCreationTimestamp sets the CreationTimestamp field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the CreationTimestamp field is set to the value of the last call. +func (b *PrometheusRuleApplyConfiguration) WithCreationTimestamp(value apismetav1.Time) *PrometheusRuleApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.ObjectMetaApplyConfiguration.CreationTimestamp = &value + return b +} + +// WithDeletionTimestamp sets the DeletionTimestamp field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the DeletionTimestamp field is set to the value of the last call. +func (b *PrometheusRuleApplyConfiguration) WithDeletionTimestamp(value apismetav1.Time) *PrometheusRuleApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.ObjectMetaApplyConfiguration.DeletionTimestamp = &value + return b +} + +// WithDeletionGracePeriodSeconds sets the DeletionGracePeriodSeconds field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the DeletionGracePeriodSeconds field is set to the value of the last call. +func (b *PrometheusRuleApplyConfiguration) WithDeletionGracePeriodSeconds(value int64) *PrometheusRuleApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.ObjectMetaApplyConfiguration.DeletionGracePeriodSeconds = &value + return b +} + +// WithLabels puts the entries into the Labels field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, the entries provided by each call will be put on the Labels field, +// overwriting an existing map entries in Labels field with the same key. +func (b *PrometheusRuleApplyConfiguration) WithLabels(entries map[string]string) *PrometheusRuleApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + if b.ObjectMetaApplyConfiguration.Labels == nil && len(entries) > 0 { + b.ObjectMetaApplyConfiguration.Labels = make(map[string]string, len(entries)) + } + for k, v := range entries { + b.ObjectMetaApplyConfiguration.Labels[k] = v + } + return b +} + +// WithAnnotations puts the entries into the Annotations field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, the entries provided by each call will be put on the Annotations field, +// overwriting an existing map entries in Annotations field with the same key. +func (b *PrometheusRuleApplyConfiguration) WithAnnotations(entries map[string]string) *PrometheusRuleApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + if b.ObjectMetaApplyConfiguration.Annotations == nil && len(entries) > 0 { + b.ObjectMetaApplyConfiguration.Annotations = make(map[string]string, len(entries)) + } + for k, v := range entries { + b.ObjectMetaApplyConfiguration.Annotations[k] = v + } + return b +} + +// WithOwnerReferences adds the given value to the OwnerReferences field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the OwnerReferences field. +func (b *PrometheusRuleApplyConfiguration) WithOwnerReferences(values ...*metav1.OwnerReferenceApplyConfiguration) *PrometheusRuleApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + for i := range values { + if values[i] == nil { + panic("nil value passed to WithOwnerReferences") + } + b.ObjectMetaApplyConfiguration.OwnerReferences = append(b.ObjectMetaApplyConfiguration.OwnerReferences, *values[i]) + } + return b +} + +// WithFinalizers adds the given value to the Finalizers field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the Finalizers field. +func (b *PrometheusRuleApplyConfiguration) WithFinalizers(values ...string) *PrometheusRuleApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + for i := range values { + b.ObjectMetaApplyConfiguration.Finalizers = append(b.ObjectMetaApplyConfiguration.Finalizers, values[i]) + } + return b +} + +func (b *PrometheusRuleApplyConfiguration) ensureObjectMetaApplyConfigurationExists() { + if b.ObjectMetaApplyConfiguration == nil { + b.ObjectMetaApplyConfiguration = &metav1.ObjectMetaApplyConfiguration{} + } +} + +// WithSpec sets the Spec field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Spec field is set to the value of the last call. +func (b *PrometheusRuleApplyConfiguration) WithSpec(value *PrometheusRuleSpecApplyConfiguration) *PrometheusRuleApplyConfiguration { + b.Spec = value + return b +} + +// WithStatus sets the Status field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Status field is set to the value of the last call. +func (b *PrometheusRuleApplyConfiguration) WithStatus(value *ConfigResourceStatusApplyConfiguration) *PrometheusRuleApplyConfiguration { + b.Status = value + return b +} + +// GetKind retrieves the value of the Kind field in the declarative configuration. +func (b *PrometheusRuleApplyConfiguration) GetKind() *string { + return b.TypeMetaApplyConfiguration.Kind +} + +// GetAPIVersion retrieves the value of the APIVersion field in the declarative configuration. +func (b *PrometheusRuleApplyConfiguration) GetAPIVersion() *string { + return b.TypeMetaApplyConfiguration.APIVersion +} + +// GetName retrieves the value of the Name field in the declarative configuration. +func (b *PrometheusRuleApplyConfiguration) GetName() *string { + b.ensureObjectMetaApplyConfigurationExists() + return b.ObjectMetaApplyConfiguration.Name +} + +// GetNamespace retrieves the value of the Namespace field in the declarative configuration. +func (b *PrometheusRuleApplyConfiguration) GetNamespace() *string { + b.ensureObjectMetaApplyConfigurationExists() + return b.ObjectMetaApplyConfiguration.Namespace +} diff --git a/vendor/github.com/prometheus-operator/prometheus-operator/pkg/client/applyconfiguration/monitoring/v1/prometheusruleexcludeconfig.go b/vendor/github.com/prometheus-operator/prometheus-operator/pkg/client/applyconfiguration/monitoring/v1/prometheusruleexcludeconfig.go new file mode 100644 index 0000000000..c5e2240656 --- /dev/null +++ b/vendor/github.com/prometheus-operator/prometheus-operator/pkg/client/applyconfiguration/monitoring/v1/prometheusruleexcludeconfig.go @@ -0,0 +1,46 @@ +// Copyright The prometheus-operator Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1 + +// PrometheusRuleExcludeConfigApplyConfiguration represents a declarative configuration of the PrometheusRuleExcludeConfig type for use +// with apply. +type PrometheusRuleExcludeConfigApplyConfiguration struct { + RuleNamespace *string `json:"ruleNamespace,omitempty"` + RuleName *string `json:"ruleName,omitempty"` +} + +// PrometheusRuleExcludeConfigApplyConfiguration constructs a declarative configuration of the PrometheusRuleExcludeConfig type for use with +// apply. +func PrometheusRuleExcludeConfig() *PrometheusRuleExcludeConfigApplyConfiguration { + return &PrometheusRuleExcludeConfigApplyConfiguration{} +} + +// WithRuleNamespace sets the RuleNamespace field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the RuleNamespace field is set to the value of the last call. +func (b *PrometheusRuleExcludeConfigApplyConfiguration) WithRuleNamespace(value string) *PrometheusRuleExcludeConfigApplyConfiguration { + b.RuleNamespace = &value + return b +} + +// WithRuleName sets the RuleName field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the RuleName field is set to the value of the last call. +func (b *PrometheusRuleExcludeConfigApplyConfiguration) WithRuleName(value string) *PrometheusRuleExcludeConfigApplyConfiguration { + b.RuleName = &value + return b +} diff --git a/vendor/github.com/prometheus-operator/prometheus-operator/pkg/client/applyconfiguration/monitoring/v1/prometheusrulespec.go b/vendor/github.com/prometheus-operator/prometheus-operator/pkg/client/applyconfiguration/monitoring/v1/prometheusrulespec.go new file mode 100644 index 0000000000..1994d5892a --- /dev/null +++ b/vendor/github.com/prometheus-operator/prometheus-operator/pkg/client/applyconfiguration/monitoring/v1/prometheusrulespec.go @@ -0,0 +1,42 @@ +// Copyright The prometheus-operator Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1 + +// PrometheusRuleSpecApplyConfiguration represents a declarative configuration of the PrometheusRuleSpec type for use +// with apply. +type PrometheusRuleSpecApplyConfiguration struct { + Groups []RuleGroupApplyConfiguration `json:"groups,omitempty"` +} + +// PrometheusRuleSpecApplyConfiguration constructs a declarative configuration of the PrometheusRuleSpec type for use with +// apply. +func PrometheusRuleSpec() *PrometheusRuleSpecApplyConfiguration { + return &PrometheusRuleSpecApplyConfiguration{} +} + +// WithGroups adds the given value to the Groups field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the Groups field. +func (b *PrometheusRuleSpecApplyConfiguration) WithGroups(values ...*RuleGroupApplyConfiguration) *PrometheusRuleSpecApplyConfiguration { + for i := range values { + if values[i] == nil { + panic("nil value passed to WithGroups") + } + b.Groups = append(b.Groups, *values[i]) + } + return b +} diff --git a/vendor/github.com/prometheus-operator/prometheus-operator/pkg/client/applyconfiguration/monitoring/v1/prometheusspec.go b/vendor/github.com/prometheus-operator/prometheus-operator/pkg/client/applyconfiguration/monitoring/v1/prometheusspec.go new file mode 100644 index 0000000000..9c6b6c6b2a --- /dev/null +++ b/vendor/github.com/prometheus-operator/prometheus-operator/pkg/client/applyconfiguration/monitoring/v1/prometheusspec.go @@ -0,0 +1,1127 @@ +// Copyright The prometheus-operator Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1 + +import ( + monitoringv1 "github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring/v1" + appsv1 "k8s.io/api/apps/v1" + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/client-go/applyconfigurations/meta/v1" +) + +// PrometheusSpecApplyConfiguration represents a declarative configuration of the PrometheusSpec type for use +// with apply. +type PrometheusSpecApplyConfiguration struct { + CommonPrometheusFieldsApplyConfiguration `json:",inline"` + BaseImage *string `json:"baseImage,omitempty"` + Tag *string `json:"tag,omitempty"` + SHA *string `json:"sha,omitempty"` + Retention *monitoringv1.Duration `json:"retention,omitempty"` + RetentionSize *monitoringv1.ByteSize `json:"retentionSize,omitempty"` + ShardRetentionPolicy *ShardRetentionPolicyApplyConfiguration `json:"shardRetentionPolicy,omitempty"` + DisableCompaction *bool `json:"disableCompaction,omitempty"` + Rules *RulesApplyConfiguration `json:"rules,omitempty"` + PrometheusRulesExcludedFromEnforce []PrometheusRuleExcludeConfigApplyConfiguration `json:"prometheusRulesExcludedFromEnforce,omitempty"` + RuleSelector *metav1.LabelSelectorApplyConfiguration `json:"ruleSelector,omitempty"` + RuleNamespaceSelector *metav1.LabelSelectorApplyConfiguration `json:"ruleNamespaceSelector,omitempty"` + Query *QuerySpecApplyConfiguration `json:"query,omitempty"` + Alerting *AlertingSpecApplyConfiguration `json:"alerting,omitempty"` + AdditionalAlertRelabelConfigs *corev1.SecretKeySelector `json:"additionalAlertRelabelConfigs,omitempty"` + AdditionalAlertManagerConfigs *corev1.SecretKeySelector `json:"additionalAlertManagerConfigs,omitempty"` + RemoteRead []RemoteReadSpecApplyConfiguration `json:"remoteRead,omitempty"` + Thanos *ThanosSpecApplyConfiguration `json:"thanos,omitempty"` + QueryLogFile *string `json:"queryLogFile,omitempty"` + AllowOverlappingBlocks *bool `json:"allowOverlappingBlocks,omitempty"` + Exemplars *ExemplarsApplyConfiguration `json:"exemplars,omitempty"` + EvaluationInterval *monitoringv1.Duration `json:"evaluationInterval,omitempty"` + RuleQueryOffset *monitoringv1.Duration `json:"ruleQueryOffset,omitempty"` + EnableAdminAPI *bool `json:"enableAdminAPI,omitempty"` +} + +// PrometheusSpecApplyConfiguration constructs a declarative configuration of the PrometheusSpec type for use with +// apply. +func PrometheusSpec() *PrometheusSpecApplyConfiguration { + return &PrometheusSpecApplyConfiguration{} +} + +// WithPodMetadata sets the PodMetadata field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the PodMetadata field is set to the value of the last call. +func (b *PrometheusSpecApplyConfiguration) WithPodMetadata(value *EmbeddedObjectMetadataApplyConfiguration) *PrometheusSpecApplyConfiguration { + b.CommonPrometheusFieldsApplyConfiguration.PodMetadata = value + return b +} + +// WithServiceMonitorSelector sets the ServiceMonitorSelector field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the ServiceMonitorSelector field is set to the value of the last call. +func (b *PrometheusSpecApplyConfiguration) WithServiceMonitorSelector(value *metav1.LabelSelectorApplyConfiguration) *PrometheusSpecApplyConfiguration { + b.CommonPrometheusFieldsApplyConfiguration.ServiceMonitorSelector = value + return b +} + +// WithServiceMonitorNamespaceSelector sets the ServiceMonitorNamespaceSelector field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the ServiceMonitorNamespaceSelector field is set to the value of the last call. +func (b *PrometheusSpecApplyConfiguration) WithServiceMonitorNamespaceSelector(value *metav1.LabelSelectorApplyConfiguration) *PrometheusSpecApplyConfiguration { + b.CommonPrometheusFieldsApplyConfiguration.ServiceMonitorNamespaceSelector = value + return b +} + +// WithPodMonitorSelector sets the PodMonitorSelector field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the PodMonitorSelector field is set to the value of the last call. +func (b *PrometheusSpecApplyConfiguration) WithPodMonitorSelector(value *metav1.LabelSelectorApplyConfiguration) *PrometheusSpecApplyConfiguration { + b.CommonPrometheusFieldsApplyConfiguration.PodMonitorSelector = value + return b +} + +// WithPodMonitorNamespaceSelector sets the PodMonitorNamespaceSelector field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the PodMonitorNamespaceSelector field is set to the value of the last call. +func (b *PrometheusSpecApplyConfiguration) WithPodMonitorNamespaceSelector(value *metav1.LabelSelectorApplyConfiguration) *PrometheusSpecApplyConfiguration { + b.CommonPrometheusFieldsApplyConfiguration.PodMonitorNamespaceSelector = value + return b +} + +// WithProbeSelector sets the ProbeSelector field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the ProbeSelector field is set to the value of the last call. +func (b *PrometheusSpecApplyConfiguration) WithProbeSelector(value *metav1.LabelSelectorApplyConfiguration) *PrometheusSpecApplyConfiguration { + b.CommonPrometheusFieldsApplyConfiguration.ProbeSelector = value + return b +} + +// WithProbeNamespaceSelector sets the ProbeNamespaceSelector field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the ProbeNamespaceSelector field is set to the value of the last call. +func (b *PrometheusSpecApplyConfiguration) WithProbeNamespaceSelector(value *metav1.LabelSelectorApplyConfiguration) *PrometheusSpecApplyConfiguration { + b.CommonPrometheusFieldsApplyConfiguration.ProbeNamespaceSelector = value + return b +} + +// WithScrapeConfigSelector sets the ScrapeConfigSelector field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the ScrapeConfigSelector field is set to the value of the last call. +func (b *PrometheusSpecApplyConfiguration) WithScrapeConfigSelector(value *metav1.LabelSelectorApplyConfiguration) *PrometheusSpecApplyConfiguration { + b.CommonPrometheusFieldsApplyConfiguration.ScrapeConfigSelector = value + return b +} + +// WithScrapeConfigNamespaceSelector sets the ScrapeConfigNamespaceSelector field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the ScrapeConfigNamespaceSelector field is set to the value of the last call. +func (b *PrometheusSpecApplyConfiguration) WithScrapeConfigNamespaceSelector(value *metav1.LabelSelectorApplyConfiguration) *PrometheusSpecApplyConfiguration { + b.CommonPrometheusFieldsApplyConfiguration.ScrapeConfigNamespaceSelector = value + return b +} + +// WithVersion sets the Version field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Version field is set to the value of the last call. +func (b *PrometheusSpecApplyConfiguration) WithVersion(value string) *PrometheusSpecApplyConfiguration { + b.CommonPrometheusFieldsApplyConfiguration.Version = &value + return b +} + +// WithPaused sets the Paused field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Paused field is set to the value of the last call. +func (b *PrometheusSpecApplyConfiguration) WithPaused(value bool) *PrometheusSpecApplyConfiguration { + b.CommonPrometheusFieldsApplyConfiguration.Paused = &value + return b +} + +// WithImage sets the Image field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Image field is set to the value of the last call. +func (b *PrometheusSpecApplyConfiguration) WithImage(value string) *PrometheusSpecApplyConfiguration { + b.CommonPrometheusFieldsApplyConfiguration.Image = &value + return b +} + +// WithImagePullPolicy sets the ImagePullPolicy field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the ImagePullPolicy field is set to the value of the last call. +func (b *PrometheusSpecApplyConfiguration) WithImagePullPolicy(value corev1.PullPolicy) *PrometheusSpecApplyConfiguration { + b.CommonPrometheusFieldsApplyConfiguration.ImagePullPolicy = &value + return b +} + +// WithImagePullSecrets adds the given value to the ImagePullSecrets field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the ImagePullSecrets field. +func (b *PrometheusSpecApplyConfiguration) WithImagePullSecrets(values ...corev1.LocalObjectReference) *PrometheusSpecApplyConfiguration { + for i := range values { + b.CommonPrometheusFieldsApplyConfiguration.ImagePullSecrets = append(b.CommonPrometheusFieldsApplyConfiguration.ImagePullSecrets, values[i]) + } + return b +} + +// WithReplicas sets the Replicas field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Replicas field is set to the value of the last call. +func (b *PrometheusSpecApplyConfiguration) WithReplicas(value int32) *PrometheusSpecApplyConfiguration { + b.CommonPrometheusFieldsApplyConfiguration.Replicas = &value + return b +} + +// WithShards sets the Shards field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Shards field is set to the value of the last call. +func (b *PrometheusSpecApplyConfiguration) WithShards(value int32) *PrometheusSpecApplyConfiguration { + b.CommonPrometheusFieldsApplyConfiguration.Shards = &value + return b +} + +// WithReplicaExternalLabelName sets the ReplicaExternalLabelName field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the ReplicaExternalLabelName field is set to the value of the last call. +func (b *PrometheusSpecApplyConfiguration) WithReplicaExternalLabelName(value string) *PrometheusSpecApplyConfiguration { + b.CommonPrometheusFieldsApplyConfiguration.ReplicaExternalLabelName = &value + return b +} + +// WithPrometheusExternalLabelName sets the PrometheusExternalLabelName field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the PrometheusExternalLabelName field is set to the value of the last call. +func (b *PrometheusSpecApplyConfiguration) WithPrometheusExternalLabelName(value string) *PrometheusSpecApplyConfiguration { + b.CommonPrometheusFieldsApplyConfiguration.PrometheusExternalLabelName = &value + return b +} + +// WithLogLevel sets the LogLevel field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the LogLevel field is set to the value of the last call. +func (b *PrometheusSpecApplyConfiguration) WithLogLevel(value string) *PrometheusSpecApplyConfiguration { + b.CommonPrometheusFieldsApplyConfiguration.LogLevel = &value + return b +} + +// WithLogFormat sets the LogFormat field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the LogFormat field is set to the value of the last call. +func (b *PrometheusSpecApplyConfiguration) WithLogFormat(value string) *PrometheusSpecApplyConfiguration { + b.CommonPrometheusFieldsApplyConfiguration.LogFormat = &value + return b +} + +// WithScrapeInterval sets the ScrapeInterval field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the ScrapeInterval field is set to the value of the last call. +func (b *PrometheusSpecApplyConfiguration) WithScrapeInterval(value monitoringv1.Duration) *PrometheusSpecApplyConfiguration { + b.CommonPrometheusFieldsApplyConfiguration.ScrapeInterval = &value + return b +} + +// WithScrapeTimeout sets the ScrapeTimeout field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the ScrapeTimeout field is set to the value of the last call. +func (b *PrometheusSpecApplyConfiguration) WithScrapeTimeout(value monitoringv1.Duration) *PrometheusSpecApplyConfiguration { + b.CommonPrometheusFieldsApplyConfiguration.ScrapeTimeout = &value + return b +} + +// WithScrapeProtocols adds the given value to the ScrapeProtocols field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the ScrapeProtocols field. +func (b *PrometheusSpecApplyConfiguration) WithScrapeProtocols(values ...monitoringv1.ScrapeProtocol) *PrometheusSpecApplyConfiguration { + for i := range values { + b.CommonPrometheusFieldsApplyConfiguration.ScrapeProtocols = append(b.CommonPrometheusFieldsApplyConfiguration.ScrapeProtocols, values[i]) + } + return b +} + +// WithExternalLabels puts the entries into the ExternalLabels field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, the entries provided by each call will be put on the ExternalLabels field, +// overwriting an existing map entries in ExternalLabels field with the same key. +func (b *PrometheusSpecApplyConfiguration) WithExternalLabels(entries map[string]string) *PrometheusSpecApplyConfiguration { + if b.CommonPrometheusFieldsApplyConfiguration.ExternalLabels == nil && len(entries) > 0 { + b.CommonPrometheusFieldsApplyConfiguration.ExternalLabels = make(map[string]string, len(entries)) + } + for k, v := range entries { + b.CommonPrometheusFieldsApplyConfiguration.ExternalLabels[k] = v + } + return b +} + +// WithEnableRemoteWriteReceiver sets the EnableRemoteWriteReceiver field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the EnableRemoteWriteReceiver field is set to the value of the last call. +func (b *PrometheusSpecApplyConfiguration) WithEnableRemoteWriteReceiver(value bool) *PrometheusSpecApplyConfiguration { + b.CommonPrometheusFieldsApplyConfiguration.EnableRemoteWriteReceiver = &value + return b +} + +// WithEnableOTLPReceiver sets the EnableOTLPReceiver field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the EnableOTLPReceiver field is set to the value of the last call. +func (b *PrometheusSpecApplyConfiguration) WithEnableOTLPReceiver(value bool) *PrometheusSpecApplyConfiguration { + b.CommonPrometheusFieldsApplyConfiguration.EnableOTLPReceiver = &value + return b +} + +// WithRemoteWriteReceiverMessageVersions adds the given value to the RemoteWriteReceiverMessageVersions field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the RemoteWriteReceiverMessageVersions field. +func (b *PrometheusSpecApplyConfiguration) WithRemoteWriteReceiverMessageVersions(values ...monitoringv1.RemoteWriteMessageVersion) *PrometheusSpecApplyConfiguration { + for i := range values { + b.CommonPrometheusFieldsApplyConfiguration.RemoteWriteReceiverMessageVersions = append(b.CommonPrometheusFieldsApplyConfiguration.RemoteWriteReceiverMessageVersions, values[i]) + } + return b +} + +// WithEnableFeatures adds the given value to the EnableFeatures field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the EnableFeatures field. +func (b *PrometheusSpecApplyConfiguration) WithEnableFeatures(values ...monitoringv1.EnableFeature) *PrometheusSpecApplyConfiguration { + for i := range values { + b.CommonPrometheusFieldsApplyConfiguration.EnableFeatures = append(b.CommonPrometheusFieldsApplyConfiguration.EnableFeatures, values[i]) + } + return b +} + +// WithExternalURL sets the ExternalURL field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the ExternalURL field is set to the value of the last call. +func (b *PrometheusSpecApplyConfiguration) WithExternalURL(value string) *PrometheusSpecApplyConfiguration { + b.CommonPrometheusFieldsApplyConfiguration.ExternalURL = &value + return b +} + +// WithRoutePrefix sets the RoutePrefix field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the RoutePrefix field is set to the value of the last call. +func (b *PrometheusSpecApplyConfiguration) WithRoutePrefix(value string) *PrometheusSpecApplyConfiguration { + b.CommonPrometheusFieldsApplyConfiguration.RoutePrefix = &value + return b +} + +// WithStorage sets the Storage field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Storage field is set to the value of the last call. +func (b *PrometheusSpecApplyConfiguration) WithStorage(value *StorageSpecApplyConfiguration) *PrometheusSpecApplyConfiguration { + b.CommonPrometheusFieldsApplyConfiguration.Storage = value + return b +} + +// WithVolumes adds the given value to the Volumes field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the Volumes field. +func (b *PrometheusSpecApplyConfiguration) WithVolumes(values ...corev1.Volume) *PrometheusSpecApplyConfiguration { + for i := range values { + b.CommonPrometheusFieldsApplyConfiguration.Volumes = append(b.CommonPrometheusFieldsApplyConfiguration.Volumes, values[i]) + } + return b +} + +// WithVolumeMounts adds the given value to the VolumeMounts field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the VolumeMounts field. +func (b *PrometheusSpecApplyConfiguration) WithVolumeMounts(values ...corev1.VolumeMount) *PrometheusSpecApplyConfiguration { + for i := range values { + b.CommonPrometheusFieldsApplyConfiguration.VolumeMounts = append(b.CommonPrometheusFieldsApplyConfiguration.VolumeMounts, values[i]) + } + return b +} + +// WithPersistentVolumeClaimRetentionPolicy sets the PersistentVolumeClaimRetentionPolicy field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the PersistentVolumeClaimRetentionPolicy field is set to the value of the last call. +func (b *PrometheusSpecApplyConfiguration) WithPersistentVolumeClaimRetentionPolicy(value appsv1.StatefulSetPersistentVolumeClaimRetentionPolicy) *PrometheusSpecApplyConfiguration { + b.CommonPrometheusFieldsApplyConfiguration.PersistentVolumeClaimRetentionPolicy = &value + return b +} + +// WithWeb sets the Web field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Web field is set to the value of the last call. +func (b *PrometheusSpecApplyConfiguration) WithWeb(value *PrometheusWebSpecApplyConfiguration) *PrometheusSpecApplyConfiguration { + b.CommonPrometheusFieldsApplyConfiguration.Web = value + return b +} + +// WithResources sets the Resources field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Resources field is set to the value of the last call. +func (b *PrometheusSpecApplyConfiguration) WithResources(value corev1.ResourceRequirements) *PrometheusSpecApplyConfiguration { + b.CommonPrometheusFieldsApplyConfiguration.Resources = &value + return b +} + +// WithNodeSelector puts the entries into the NodeSelector field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, the entries provided by each call will be put on the NodeSelector field, +// overwriting an existing map entries in NodeSelector field with the same key. +func (b *PrometheusSpecApplyConfiguration) WithNodeSelector(entries map[string]string) *PrometheusSpecApplyConfiguration { + if b.CommonPrometheusFieldsApplyConfiguration.NodeSelector == nil && len(entries) > 0 { + b.CommonPrometheusFieldsApplyConfiguration.NodeSelector = make(map[string]string, len(entries)) + } + for k, v := range entries { + b.CommonPrometheusFieldsApplyConfiguration.NodeSelector[k] = v + } + return b +} + +// WithServiceAccountName sets the ServiceAccountName field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the ServiceAccountName field is set to the value of the last call. +func (b *PrometheusSpecApplyConfiguration) WithServiceAccountName(value string) *PrometheusSpecApplyConfiguration { + b.CommonPrometheusFieldsApplyConfiguration.ServiceAccountName = &value + return b +} + +// WithAutomountServiceAccountToken sets the AutomountServiceAccountToken field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the AutomountServiceAccountToken field is set to the value of the last call. +func (b *PrometheusSpecApplyConfiguration) WithAutomountServiceAccountToken(value bool) *PrometheusSpecApplyConfiguration { + b.CommonPrometheusFieldsApplyConfiguration.AutomountServiceAccountToken = &value + return b +} + +// WithSecrets adds the given value to the Secrets field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the Secrets field. +func (b *PrometheusSpecApplyConfiguration) WithSecrets(values ...string) *PrometheusSpecApplyConfiguration { + for i := range values { + b.CommonPrometheusFieldsApplyConfiguration.Secrets = append(b.CommonPrometheusFieldsApplyConfiguration.Secrets, values[i]) + } + return b +} + +// WithConfigMaps adds the given value to the ConfigMaps field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the ConfigMaps field. +func (b *PrometheusSpecApplyConfiguration) WithConfigMaps(values ...string) *PrometheusSpecApplyConfiguration { + for i := range values { + b.CommonPrometheusFieldsApplyConfiguration.ConfigMaps = append(b.CommonPrometheusFieldsApplyConfiguration.ConfigMaps, values[i]) + } + return b +} + +// WithAffinity sets the Affinity field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Affinity field is set to the value of the last call. +func (b *PrometheusSpecApplyConfiguration) WithAffinity(value corev1.Affinity) *PrometheusSpecApplyConfiguration { + b.CommonPrometheusFieldsApplyConfiguration.Affinity = &value + return b +} + +// WithTolerations adds the given value to the Tolerations field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the Tolerations field. +func (b *PrometheusSpecApplyConfiguration) WithTolerations(values ...corev1.Toleration) *PrometheusSpecApplyConfiguration { + for i := range values { + b.CommonPrometheusFieldsApplyConfiguration.Tolerations = append(b.CommonPrometheusFieldsApplyConfiguration.Tolerations, values[i]) + } + return b +} + +// WithTopologySpreadConstraints adds the given value to the TopologySpreadConstraints field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the TopologySpreadConstraints field. +func (b *PrometheusSpecApplyConfiguration) WithTopologySpreadConstraints(values ...*TopologySpreadConstraintApplyConfiguration) *PrometheusSpecApplyConfiguration { + for i := range values { + if values[i] == nil { + panic("nil value passed to WithTopologySpreadConstraints") + } + b.CommonPrometheusFieldsApplyConfiguration.TopologySpreadConstraints = append(b.CommonPrometheusFieldsApplyConfiguration.TopologySpreadConstraints, *values[i]) + } + return b +} + +// WithRemoteWrite adds the given value to the RemoteWrite field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the RemoteWrite field. +func (b *PrometheusSpecApplyConfiguration) WithRemoteWrite(values ...*RemoteWriteSpecApplyConfiguration) *PrometheusSpecApplyConfiguration { + for i := range values { + if values[i] == nil { + panic("nil value passed to WithRemoteWrite") + } + b.CommonPrometheusFieldsApplyConfiguration.RemoteWrite = append(b.CommonPrometheusFieldsApplyConfiguration.RemoteWrite, *values[i]) + } + return b +} + +// WithOTLP sets the OTLP field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the OTLP field is set to the value of the last call. +func (b *PrometheusSpecApplyConfiguration) WithOTLP(value *OTLPConfigApplyConfiguration) *PrometheusSpecApplyConfiguration { + b.CommonPrometheusFieldsApplyConfiguration.OTLP = value + return b +} + +// WithSecurityContext sets the SecurityContext field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the SecurityContext field is set to the value of the last call. +func (b *PrometheusSpecApplyConfiguration) WithSecurityContext(value corev1.PodSecurityContext) *PrometheusSpecApplyConfiguration { + b.CommonPrometheusFieldsApplyConfiguration.SecurityContext = &value + return b +} + +// WithDNSPolicy sets the DNSPolicy field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the DNSPolicy field is set to the value of the last call. +func (b *PrometheusSpecApplyConfiguration) WithDNSPolicy(value monitoringv1.DNSPolicy) *PrometheusSpecApplyConfiguration { + b.CommonPrometheusFieldsApplyConfiguration.DNSPolicy = &value + return b +} + +// WithDNSConfig sets the DNSConfig field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the DNSConfig field is set to the value of the last call. +func (b *PrometheusSpecApplyConfiguration) WithDNSConfig(value *PodDNSConfigApplyConfiguration) *PrometheusSpecApplyConfiguration { + b.CommonPrometheusFieldsApplyConfiguration.DNSConfig = value + return b +} + +// WithListenLocal sets the ListenLocal field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the ListenLocal field is set to the value of the last call. +func (b *PrometheusSpecApplyConfiguration) WithListenLocal(value bool) *PrometheusSpecApplyConfiguration { + b.CommonPrometheusFieldsApplyConfiguration.ListenLocal = &value + return b +} + +// WithPodManagementPolicy sets the PodManagementPolicy field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the PodManagementPolicy field is set to the value of the last call. +func (b *PrometheusSpecApplyConfiguration) WithPodManagementPolicy(value monitoringv1.PodManagementPolicyType) *PrometheusSpecApplyConfiguration { + b.CommonPrometheusFieldsApplyConfiguration.PodManagementPolicy = &value + return b +} + +// WithUpdateStrategy sets the UpdateStrategy field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the UpdateStrategy field is set to the value of the last call. +func (b *PrometheusSpecApplyConfiguration) WithUpdateStrategy(value *StatefulSetUpdateStrategyApplyConfiguration) *PrometheusSpecApplyConfiguration { + b.CommonPrometheusFieldsApplyConfiguration.UpdateStrategy = value + return b +} + +// WithEnableServiceLinks sets the EnableServiceLinks field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the EnableServiceLinks field is set to the value of the last call. +func (b *PrometheusSpecApplyConfiguration) WithEnableServiceLinks(value bool) *PrometheusSpecApplyConfiguration { + b.CommonPrometheusFieldsApplyConfiguration.EnableServiceLinks = &value + return b +} + +// WithContainers adds the given value to the Containers field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the Containers field. +func (b *PrometheusSpecApplyConfiguration) WithContainers(values ...corev1.Container) *PrometheusSpecApplyConfiguration { + for i := range values { + b.CommonPrometheusFieldsApplyConfiguration.Containers = append(b.CommonPrometheusFieldsApplyConfiguration.Containers, values[i]) + } + return b +} + +// WithInitContainers adds the given value to the InitContainers field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the InitContainers field. +func (b *PrometheusSpecApplyConfiguration) WithInitContainers(values ...corev1.Container) *PrometheusSpecApplyConfiguration { + for i := range values { + b.CommonPrometheusFieldsApplyConfiguration.InitContainers = append(b.CommonPrometheusFieldsApplyConfiguration.InitContainers, values[i]) + } + return b +} + +// WithAdditionalScrapeConfigs sets the AdditionalScrapeConfigs field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the AdditionalScrapeConfigs field is set to the value of the last call. +func (b *PrometheusSpecApplyConfiguration) WithAdditionalScrapeConfigs(value corev1.SecretKeySelector) *PrometheusSpecApplyConfiguration { + b.CommonPrometheusFieldsApplyConfiguration.AdditionalScrapeConfigs = &value + return b +} + +// WithAPIServerConfig sets the APIServerConfig field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the APIServerConfig field is set to the value of the last call. +func (b *PrometheusSpecApplyConfiguration) WithAPIServerConfig(value *APIServerConfigApplyConfiguration) *PrometheusSpecApplyConfiguration { + b.CommonPrometheusFieldsApplyConfiguration.APIServerConfig = value + return b +} + +// WithPriorityClassName sets the PriorityClassName field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the PriorityClassName field is set to the value of the last call. +func (b *PrometheusSpecApplyConfiguration) WithPriorityClassName(value string) *PrometheusSpecApplyConfiguration { + b.CommonPrometheusFieldsApplyConfiguration.PriorityClassName = &value + return b +} + +// WithPortName sets the PortName field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the PortName field is set to the value of the last call. +func (b *PrometheusSpecApplyConfiguration) WithPortName(value string) *PrometheusSpecApplyConfiguration { + b.CommonPrometheusFieldsApplyConfiguration.PortName = &value + return b +} + +// WithArbitraryFSAccessThroughSMs sets the ArbitraryFSAccessThroughSMs field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the ArbitraryFSAccessThroughSMs field is set to the value of the last call. +func (b *PrometheusSpecApplyConfiguration) WithArbitraryFSAccessThroughSMs(value *ArbitraryFSAccessThroughSMsConfigApplyConfiguration) *PrometheusSpecApplyConfiguration { + b.CommonPrometheusFieldsApplyConfiguration.ArbitraryFSAccessThroughSMs = value + return b +} + +// WithOverrideHonorLabels sets the OverrideHonorLabels field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the OverrideHonorLabels field is set to the value of the last call. +func (b *PrometheusSpecApplyConfiguration) WithOverrideHonorLabels(value bool) *PrometheusSpecApplyConfiguration { + b.CommonPrometheusFieldsApplyConfiguration.OverrideHonorLabels = &value + return b +} + +// WithOverrideHonorTimestamps sets the OverrideHonorTimestamps field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the OverrideHonorTimestamps field is set to the value of the last call. +func (b *PrometheusSpecApplyConfiguration) WithOverrideHonorTimestamps(value bool) *PrometheusSpecApplyConfiguration { + b.CommonPrometheusFieldsApplyConfiguration.OverrideHonorTimestamps = &value + return b +} + +// WithIgnoreNamespaceSelectors sets the IgnoreNamespaceSelectors field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the IgnoreNamespaceSelectors field is set to the value of the last call. +func (b *PrometheusSpecApplyConfiguration) WithIgnoreNamespaceSelectors(value bool) *PrometheusSpecApplyConfiguration { + b.CommonPrometheusFieldsApplyConfiguration.IgnoreNamespaceSelectors = &value + return b +} + +// WithEnforcedNamespaceLabel sets the EnforcedNamespaceLabel field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the EnforcedNamespaceLabel field is set to the value of the last call. +func (b *PrometheusSpecApplyConfiguration) WithEnforcedNamespaceLabel(value string) *PrometheusSpecApplyConfiguration { + b.CommonPrometheusFieldsApplyConfiguration.EnforcedNamespaceLabel = &value + return b +} + +// WithEnforcedSampleLimit sets the EnforcedSampleLimit field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the EnforcedSampleLimit field is set to the value of the last call. +func (b *PrometheusSpecApplyConfiguration) WithEnforcedSampleLimit(value uint64) *PrometheusSpecApplyConfiguration { + b.CommonPrometheusFieldsApplyConfiguration.EnforcedSampleLimit = &value + return b +} + +// WithEnforcedTargetLimit sets the EnforcedTargetLimit field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the EnforcedTargetLimit field is set to the value of the last call. +func (b *PrometheusSpecApplyConfiguration) WithEnforcedTargetLimit(value uint64) *PrometheusSpecApplyConfiguration { + b.CommonPrometheusFieldsApplyConfiguration.EnforcedTargetLimit = &value + return b +} + +// WithEnforcedLabelLimit sets the EnforcedLabelLimit field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the EnforcedLabelLimit field is set to the value of the last call. +func (b *PrometheusSpecApplyConfiguration) WithEnforcedLabelLimit(value uint64) *PrometheusSpecApplyConfiguration { + b.CommonPrometheusFieldsApplyConfiguration.EnforcedLabelLimit = &value + return b +} + +// WithEnforcedLabelNameLengthLimit sets the EnforcedLabelNameLengthLimit field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the EnforcedLabelNameLengthLimit field is set to the value of the last call. +func (b *PrometheusSpecApplyConfiguration) WithEnforcedLabelNameLengthLimit(value uint64) *PrometheusSpecApplyConfiguration { + b.CommonPrometheusFieldsApplyConfiguration.EnforcedLabelNameLengthLimit = &value + return b +} + +// WithEnforcedLabelValueLengthLimit sets the EnforcedLabelValueLengthLimit field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the EnforcedLabelValueLengthLimit field is set to the value of the last call. +func (b *PrometheusSpecApplyConfiguration) WithEnforcedLabelValueLengthLimit(value uint64) *PrometheusSpecApplyConfiguration { + b.CommonPrometheusFieldsApplyConfiguration.EnforcedLabelValueLengthLimit = &value + return b +} + +// WithEnforcedKeepDroppedTargets sets the EnforcedKeepDroppedTargets field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the EnforcedKeepDroppedTargets field is set to the value of the last call. +func (b *PrometheusSpecApplyConfiguration) WithEnforcedKeepDroppedTargets(value uint64) *PrometheusSpecApplyConfiguration { + b.CommonPrometheusFieldsApplyConfiguration.EnforcedKeepDroppedTargets = &value + return b +} + +// WithEnforcedBodySizeLimit sets the EnforcedBodySizeLimit field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the EnforcedBodySizeLimit field is set to the value of the last call. +func (b *PrometheusSpecApplyConfiguration) WithEnforcedBodySizeLimit(value monitoringv1.ByteSize) *PrometheusSpecApplyConfiguration { + b.CommonPrometheusFieldsApplyConfiguration.EnforcedBodySizeLimit = &value + return b +} + +// WithNameValidationScheme sets the NameValidationScheme field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the NameValidationScheme field is set to the value of the last call. +func (b *PrometheusSpecApplyConfiguration) WithNameValidationScheme(value monitoringv1.NameValidationSchemeOptions) *PrometheusSpecApplyConfiguration { + b.CommonPrometheusFieldsApplyConfiguration.NameValidationScheme = &value + return b +} + +// WithNameEscapingScheme sets the NameEscapingScheme field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the NameEscapingScheme field is set to the value of the last call. +func (b *PrometheusSpecApplyConfiguration) WithNameEscapingScheme(value monitoringv1.NameEscapingSchemeOptions) *PrometheusSpecApplyConfiguration { + b.CommonPrometheusFieldsApplyConfiguration.NameEscapingScheme = &value + return b +} + +// WithConvertClassicHistogramsToNHCB sets the ConvertClassicHistogramsToNHCB field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the ConvertClassicHistogramsToNHCB field is set to the value of the last call. +func (b *PrometheusSpecApplyConfiguration) WithConvertClassicHistogramsToNHCB(value bool) *PrometheusSpecApplyConfiguration { + b.CommonPrometheusFieldsApplyConfiguration.ConvertClassicHistogramsToNHCB = &value + return b +} + +// WithScrapeNativeHistograms sets the ScrapeNativeHistograms field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the ScrapeNativeHistograms field is set to the value of the last call. +func (b *PrometheusSpecApplyConfiguration) WithScrapeNativeHistograms(value bool) *PrometheusSpecApplyConfiguration { + b.CommonPrometheusFieldsApplyConfiguration.ScrapeNativeHistograms = &value + return b +} + +// WithScrapeClassicHistograms sets the ScrapeClassicHistograms field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the ScrapeClassicHistograms field is set to the value of the last call. +func (b *PrometheusSpecApplyConfiguration) WithScrapeClassicHistograms(value bool) *PrometheusSpecApplyConfiguration { + b.CommonPrometheusFieldsApplyConfiguration.ScrapeClassicHistograms = &value + return b +} + +// WithMinReadySeconds sets the MinReadySeconds field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the MinReadySeconds field is set to the value of the last call. +func (b *PrometheusSpecApplyConfiguration) WithMinReadySeconds(value int32) *PrometheusSpecApplyConfiguration { + b.CommonPrometheusFieldsApplyConfiguration.MinReadySeconds = &value + return b +} + +// WithHostAliases adds the given value to the HostAliases field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the HostAliases field. +func (b *PrometheusSpecApplyConfiguration) WithHostAliases(values ...*HostAliasApplyConfiguration) *PrometheusSpecApplyConfiguration { + for i := range values { + if values[i] == nil { + panic("nil value passed to WithHostAliases") + } + b.CommonPrometheusFieldsApplyConfiguration.HostAliases = append(b.CommonPrometheusFieldsApplyConfiguration.HostAliases, *values[i]) + } + return b +} + +// WithAdditionalArgs adds the given value to the AdditionalArgs field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the AdditionalArgs field. +func (b *PrometheusSpecApplyConfiguration) WithAdditionalArgs(values ...*ArgumentApplyConfiguration) *PrometheusSpecApplyConfiguration { + for i := range values { + if values[i] == nil { + panic("nil value passed to WithAdditionalArgs") + } + b.CommonPrometheusFieldsApplyConfiguration.AdditionalArgs = append(b.CommonPrometheusFieldsApplyConfiguration.AdditionalArgs, *values[i]) + } + return b +} + +// WithWALCompression sets the WALCompression field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the WALCompression field is set to the value of the last call. +func (b *PrometheusSpecApplyConfiguration) WithWALCompression(value bool) *PrometheusSpecApplyConfiguration { + b.CommonPrometheusFieldsApplyConfiguration.WALCompression = &value + return b +} + +// WithExcludedFromEnforcement adds the given value to the ExcludedFromEnforcement field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the ExcludedFromEnforcement field. +func (b *PrometheusSpecApplyConfiguration) WithExcludedFromEnforcement(values ...*ObjectReferenceApplyConfiguration) *PrometheusSpecApplyConfiguration { + for i := range values { + if values[i] == nil { + panic("nil value passed to WithExcludedFromEnforcement") + } + b.CommonPrometheusFieldsApplyConfiguration.ExcludedFromEnforcement = append(b.CommonPrometheusFieldsApplyConfiguration.ExcludedFromEnforcement, *values[i]) + } + return b +} + +// WithHostNetwork sets the HostNetwork field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the HostNetwork field is set to the value of the last call. +func (b *PrometheusSpecApplyConfiguration) WithHostNetwork(value bool) *PrometheusSpecApplyConfiguration { + b.CommonPrometheusFieldsApplyConfiguration.HostNetwork = &value + return b +} + +// WithPodTargetLabels adds the given value to the PodTargetLabels field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the PodTargetLabels field. +func (b *PrometheusSpecApplyConfiguration) WithPodTargetLabels(values ...string) *PrometheusSpecApplyConfiguration { + for i := range values { + b.CommonPrometheusFieldsApplyConfiguration.PodTargetLabels = append(b.CommonPrometheusFieldsApplyConfiguration.PodTargetLabels, values[i]) + } + return b +} + +// WithTracingConfig sets the TracingConfig field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the TracingConfig field is set to the value of the last call. +func (b *PrometheusSpecApplyConfiguration) WithTracingConfig(value *TracingConfigApplyConfiguration) *PrometheusSpecApplyConfiguration { + b.CommonPrometheusFieldsApplyConfiguration.TracingConfig = value + return b +} + +// WithBodySizeLimit sets the BodySizeLimit field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the BodySizeLimit field is set to the value of the last call. +func (b *PrometheusSpecApplyConfiguration) WithBodySizeLimit(value monitoringv1.ByteSize) *PrometheusSpecApplyConfiguration { + b.CommonPrometheusFieldsApplyConfiguration.BodySizeLimit = &value + return b +} + +// WithSampleLimit sets the SampleLimit field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the SampleLimit field is set to the value of the last call. +func (b *PrometheusSpecApplyConfiguration) WithSampleLimit(value uint64) *PrometheusSpecApplyConfiguration { + b.CommonPrometheusFieldsApplyConfiguration.SampleLimit = &value + return b +} + +// WithTargetLimit sets the TargetLimit field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the TargetLimit field is set to the value of the last call. +func (b *PrometheusSpecApplyConfiguration) WithTargetLimit(value uint64) *PrometheusSpecApplyConfiguration { + b.CommonPrometheusFieldsApplyConfiguration.TargetLimit = &value + return b +} + +// WithLabelLimit sets the LabelLimit field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the LabelLimit field is set to the value of the last call. +func (b *PrometheusSpecApplyConfiguration) WithLabelLimit(value uint64) *PrometheusSpecApplyConfiguration { + b.CommonPrometheusFieldsApplyConfiguration.LabelLimit = &value + return b +} + +// WithLabelNameLengthLimit sets the LabelNameLengthLimit field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the LabelNameLengthLimit field is set to the value of the last call. +func (b *PrometheusSpecApplyConfiguration) WithLabelNameLengthLimit(value uint64) *PrometheusSpecApplyConfiguration { + b.CommonPrometheusFieldsApplyConfiguration.LabelNameLengthLimit = &value + return b +} + +// WithLabelValueLengthLimit sets the LabelValueLengthLimit field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the LabelValueLengthLimit field is set to the value of the last call. +func (b *PrometheusSpecApplyConfiguration) WithLabelValueLengthLimit(value uint64) *PrometheusSpecApplyConfiguration { + b.CommonPrometheusFieldsApplyConfiguration.LabelValueLengthLimit = &value + return b +} + +// WithKeepDroppedTargets sets the KeepDroppedTargets field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the KeepDroppedTargets field is set to the value of the last call. +func (b *PrometheusSpecApplyConfiguration) WithKeepDroppedTargets(value uint64) *PrometheusSpecApplyConfiguration { + b.CommonPrometheusFieldsApplyConfiguration.KeepDroppedTargets = &value + return b +} + +// WithReloadStrategy sets the ReloadStrategy field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the ReloadStrategy field is set to the value of the last call. +func (b *PrometheusSpecApplyConfiguration) WithReloadStrategy(value monitoringv1.ReloadStrategyType) *PrometheusSpecApplyConfiguration { + b.CommonPrometheusFieldsApplyConfiguration.ReloadStrategy = &value + return b +} + +// WithMaximumStartupDurationSeconds sets the MaximumStartupDurationSeconds field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the MaximumStartupDurationSeconds field is set to the value of the last call. +func (b *PrometheusSpecApplyConfiguration) WithMaximumStartupDurationSeconds(value int32) *PrometheusSpecApplyConfiguration { + b.CommonPrometheusFieldsApplyConfiguration.MaximumStartupDurationSeconds = &value + return b +} + +// WithScrapeClasses adds the given value to the ScrapeClasses field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the ScrapeClasses field. +func (b *PrometheusSpecApplyConfiguration) WithScrapeClasses(values ...*ScrapeClassApplyConfiguration) *PrometheusSpecApplyConfiguration { + for i := range values { + if values[i] == nil { + panic("nil value passed to WithScrapeClasses") + } + b.CommonPrometheusFieldsApplyConfiguration.ScrapeClasses = append(b.CommonPrometheusFieldsApplyConfiguration.ScrapeClasses, *values[i]) + } + return b +} + +// WithServiceDiscoveryRole sets the ServiceDiscoveryRole field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the ServiceDiscoveryRole field is set to the value of the last call. +func (b *PrometheusSpecApplyConfiguration) WithServiceDiscoveryRole(value monitoringv1.ServiceDiscoveryRole) *PrometheusSpecApplyConfiguration { + b.CommonPrometheusFieldsApplyConfiguration.ServiceDiscoveryRole = &value + return b +} + +// WithTSDB sets the TSDB field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the TSDB field is set to the value of the last call. +func (b *PrometheusSpecApplyConfiguration) WithTSDB(value *TSDBSpecApplyConfiguration) *PrometheusSpecApplyConfiguration { + b.CommonPrometheusFieldsApplyConfiguration.TSDB = value + return b +} + +// WithScrapeFailureLogFile sets the ScrapeFailureLogFile field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the ScrapeFailureLogFile field is set to the value of the last call. +func (b *PrometheusSpecApplyConfiguration) WithScrapeFailureLogFile(value string) *PrometheusSpecApplyConfiguration { + b.CommonPrometheusFieldsApplyConfiguration.ScrapeFailureLogFile = &value + return b +} + +// WithServiceName sets the ServiceName field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the ServiceName field is set to the value of the last call. +func (b *PrometheusSpecApplyConfiguration) WithServiceName(value string) *PrometheusSpecApplyConfiguration { + b.CommonPrometheusFieldsApplyConfiguration.ServiceName = &value + return b +} + +// WithRuntime sets the Runtime field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Runtime field is set to the value of the last call. +func (b *PrometheusSpecApplyConfiguration) WithRuntime(value *RuntimeConfigApplyConfiguration) *PrometheusSpecApplyConfiguration { + b.CommonPrometheusFieldsApplyConfiguration.Runtime = value + return b +} + +// WithTerminationGracePeriodSeconds sets the TerminationGracePeriodSeconds field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the TerminationGracePeriodSeconds field is set to the value of the last call. +func (b *PrometheusSpecApplyConfiguration) WithTerminationGracePeriodSeconds(value int64) *PrometheusSpecApplyConfiguration { + b.CommonPrometheusFieldsApplyConfiguration.TerminationGracePeriodSeconds = &value + return b +} + +// WithHostUsers sets the HostUsers field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the HostUsers field is set to the value of the last call. +func (b *PrometheusSpecApplyConfiguration) WithHostUsers(value bool) *PrometheusSpecApplyConfiguration { + b.CommonPrometheusFieldsApplyConfiguration.HostUsers = &value + return b +} + +// WithBaseImage sets the BaseImage field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the BaseImage field is set to the value of the last call. +func (b *PrometheusSpecApplyConfiguration) WithBaseImage(value string) *PrometheusSpecApplyConfiguration { + b.BaseImage = &value + return b +} + +// WithTag sets the Tag field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Tag field is set to the value of the last call. +func (b *PrometheusSpecApplyConfiguration) WithTag(value string) *PrometheusSpecApplyConfiguration { + b.Tag = &value + return b +} + +// WithSHA sets the SHA field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the SHA field is set to the value of the last call. +func (b *PrometheusSpecApplyConfiguration) WithSHA(value string) *PrometheusSpecApplyConfiguration { + b.SHA = &value + return b +} + +// WithRetention sets the Retention field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Retention field is set to the value of the last call. +func (b *PrometheusSpecApplyConfiguration) WithRetention(value monitoringv1.Duration) *PrometheusSpecApplyConfiguration { + b.Retention = &value + return b +} + +// WithRetentionSize sets the RetentionSize field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the RetentionSize field is set to the value of the last call. +func (b *PrometheusSpecApplyConfiguration) WithRetentionSize(value monitoringv1.ByteSize) *PrometheusSpecApplyConfiguration { + b.RetentionSize = &value + return b +} + +// WithShardRetentionPolicy sets the ShardRetentionPolicy field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the ShardRetentionPolicy field is set to the value of the last call. +func (b *PrometheusSpecApplyConfiguration) WithShardRetentionPolicy(value *ShardRetentionPolicyApplyConfiguration) *PrometheusSpecApplyConfiguration { + b.ShardRetentionPolicy = value + return b +} + +// WithDisableCompaction sets the DisableCompaction field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the DisableCompaction field is set to the value of the last call. +func (b *PrometheusSpecApplyConfiguration) WithDisableCompaction(value bool) *PrometheusSpecApplyConfiguration { + b.DisableCompaction = &value + return b +} + +// WithRules sets the Rules field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Rules field is set to the value of the last call. +func (b *PrometheusSpecApplyConfiguration) WithRules(value *RulesApplyConfiguration) *PrometheusSpecApplyConfiguration { + b.Rules = value + return b +} + +// WithPrometheusRulesExcludedFromEnforce adds the given value to the PrometheusRulesExcludedFromEnforce field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the PrometheusRulesExcludedFromEnforce field. +func (b *PrometheusSpecApplyConfiguration) WithPrometheusRulesExcludedFromEnforce(values ...*PrometheusRuleExcludeConfigApplyConfiguration) *PrometheusSpecApplyConfiguration { + for i := range values { + if values[i] == nil { + panic("nil value passed to WithPrometheusRulesExcludedFromEnforce") + } + b.PrometheusRulesExcludedFromEnforce = append(b.PrometheusRulesExcludedFromEnforce, *values[i]) + } + return b +} + +// WithRuleSelector sets the RuleSelector field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the RuleSelector field is set to the value of the last call. +func (b *PrometheusSpecApplyConfiguration) WithRuleSelector(value *metav1.LabelSelectorApplyConfiguration) *PrometheusSpecApplyConfiguration { + b.RuleSelector = value + return b +} + +// WithRuleNamespaceSelector sets the RuleNamespaceSelector field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the RuleNamespaceSelector field is set to the value of the last call. +func (b *PrometheusSpecApplyConfiguration) WithRuleNamespaceSelector(value *metav1.LabelSelectorApplyConfiguration) *PrometheusSpecApplyConfiguration { + b.RuleNamespaceSelector = value + return b +} + +// WithQuery sets the Query field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Query field is set to the value of the last call. +func (b *PrometheusSpecApplyConfiguration) WithQuery(value *QuerySpecApplyConfiguration) *PrometheusSpecApplyConfiguration { + b.Query = value + return b +} + +// WithAlerting sets the Alerting field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Alerting field is set to the value of the last call. +func (b *PrometheusSpecApplyConfiguration) WithAlerting(value *AlertingSpecApplyConfiguration) *PrometheusSpecApplyConfiguration { + b.Alerting = value + return b +} + +// WithAdditionalAlertRelabelConfigs sets the AdditionalAlertRelabelConfigs field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the AdditionalAlertRelabelConfigs field is set to the value of the last call. +func (b *PrometheusSpecApplyConfiguration) WithAdditionalAlertRelabelConfigs(value corev1.SecretKeySelector) *PrometheusSpecApplyConfiguration { + b.AdditionalAlertRelabelConfigs = &value + return b +} + +// WithAdditionalAlertManagerConfigs sets the AdditionalAlertManagerConfigs field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the AdditionalAlertManagerConfigs field is set to the value of the last call. +func (b *PrometheusSpecApplyConfiguration) WithAdditionalAlertManagerConfigs(value corev1.SecretKeySelector) *PrometheusSpecApplyConfiguration { + b.AdditionalAlertManagerConfigs = &value + return b +} + +// WithRemoteRead adds the given value to the RemoteRead field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the RemoteRead field. +func (b *PrometheusSpecApplyConfiguration) WithRemoteRead(values ...*RemoteReadSpecApplyConfiguration) *PrometheusSpecApplyConfiguration { + for i := range values { + if values[i] == nil { + panic("nil value passed to WithRemoteRead") + } + b.RemoteRead = append(b.RemoteRead, *values[i]) + } + return b +} + +// WithThanos sets the Thanos field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Thanos field is set to the value of the last call. +func (b *PrometheusSpecApplyConfiguration) WithThanos(value *ThanosSpecApplyConfiguration) *PrometheusSpecApplyConfiguration { + b.Thanos = value + return b +} + +// WithQueryLogFile sets the QueryLogFile field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the QueryLogFile field is set to the value of the last call. +func (b *PrometheusSpecApplyConfiguration) WithQueryLogFile(value string) *PrometheusSpecApplyConfiguration { + b.QueryLogFile = &value + return b +} + +// WithAllowOverlappingBlocks sets the AllowOverlappingBlocks field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the AllowOverlappingBlocks field is set to the value of the last call. +func (b *PrometheusSpecApplyConfiguration) WithAllowOverlappingBlocks(value bool) *PrometheusSpecApplyConfiguration { + b.AllowOverlappingBlocks = &value + return b +} + +// WithExemplars sets the Exemplars field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Exemplars field is set to the value of the last call. +func (b *PrometheusSpecApplyConfiguration) WithExemplars(value *ExemplarsApplyConfiguration) *PrometheusSpecApplyConfiguration { + b.Exemplars = value + return b +} + +// WithEvaluationInterval sets the EvaluationInterval field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the EvaluationInterval field is set to the value of the last call. +func (b *PrometheusSpecApplyConfiguration) WithEvaluationInterval(value monitoringv1.Duration) *PrometheusSpecApplyConfiguration { + b.EvaluationInterval = &value + return b +} + +// WithRuleQueryOffset sets the RuleQueryOffset field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the RuleQueryOffset field is set to the value of the last call. +func (b *PrometheusSpecApplyConfiguration) WithRuleQueryOffset(value monitoringv1.Duration) *PrometheusSpecApplyConfiguration { + b.RuleQueryOffset = &value + return b +} + +// WithEnableAdminAPI sets the EnableAdminAPI field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the EnableAdminAPI field is set to the value of the last call. +func (b *PrometheusSpecApplyConfiguration) WithEnableAdminAPI(value bool) *PrometheusSpecApplyConfiguration { + b.EnableAdminAPI = &value + return b +} diff --git a/vendor/github.com/prometheus-operator/prometheus-operator/pkg/client/applyconfiguration/monitoring/v1/prometheusstatus.go b/vendor/github.com/prometheus-operator/prometheus-operator/pkg/client/applyconfiguration/monitoring/v1/prometheusstatus.go new file mode 100644 index 0000000000..53f0c59e8c --- /dev/null +++ b/vendor/github.com/prometheus-operator/prometheus-operator/pkg/client/applyconfiguration/monitoring/v1/prometheusstatus.go @@ -0,0 +1,119 @@ +// Copyright The prometheus-operator Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1 + +// PrometheusStatusApplyConfiguration represents a declarative configuration of the PrometheusStatus type for use +// with apply. +type PrometheusStatusApplyConfiguration struct { + Paused *bool `json:"paused,omitempty"` + Replicas *int32 `json:"replicas,omitempty"` + UpdatedReplicas *int32 `json:"updatedReplicas,omitempty"` + AvailableReplicas *int32 `json:"availableReplicas,omitempty"` + UnavailableReplicas *int32 `json:"unavailableReplicas,omitempty"` + Conditions []ConditionApplyConfiguration `json:"conditions,omitempty"` + ShardStatuses []ShardStatusApplyConfiguration `json:"shardStatuses,omitempty"` + Shards *int32 `json:"shards,omitempty"` + Selector *string `json:"selector,omitempty"` +} + +// PrometheusStatusApplyConfiguration constructs a declarative configuration of the PrometheusStatus type for use with +// apply. +func PrometheusStatus() *PrometheusStatusApplyConfiguration { + return &PrometheusStatusApplyConfiguration{} +} + +// WithPaused sets the Paused field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Paused field is set to the value of the last call. +func (b *PrometheusStatusApplyConfiguration) WithPaused(value bool) *PrometheusStatusApplyConfiguration { + b.Paused = &value + return b +} + +// WithReplicas sets the Replicas field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Replicas field is set to the value of the last call. +func (b *PrometheusStatusApplyConfiguration) WithReplicas(value int32) *PrometheusStatusApplyConfiguration { + b.Replicas = &value + return b +} + +// WithUpdatedReplicas sets the UpdatedReplicas field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the UpdatedReplicas field is set to the value of the last call. +func (b *PrometheusStatusApplyConfiguration) WithUpdatedReplicas(value int32) *PrometheusStatusApplyConfiguration { + b.UpdatedReplicas = &value + return b +} + +// WithAvailableReplicas sets the AvailableReplicas field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the AvailableReplicas field is set to the value of the last call. +func (b *PrometheusStatusApplyConfiguration) WithAvailableReplicas(value int32) *PrometheusStatusApplyConfiguration { + b.AvailableReplicas = &value + return b +} + +// WithUnavailableReplicas sets the UnavailableReplicas field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the UnavailableReplicas field is set to the value of the last call. +func (b *PrometheusStatusApplyConfiguration) WithUnavailableReplicas(value int32) *PrometheusStatusApplyConfiguration { + b.UnavailableReplicas = &value + return b +} + +// WithConditions adds the given value to the Conditions field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the Conditions field. +func (b *PrometheusStatusApplyConfiguration) WithConditions(values ...*ConditionApplyConfiguration) *PrometheusStatusApplyConfiguration { + for i := range values { + if values[i] == nil { + panic("nil value passed to WithConditions") + } + b.Conditions = append(b.Conditions, *values[i]) + } + return b +} + +// WithShardStatuses adds the given value to the ShardStatuses field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the ShardStatuses field. +func (b *PrometheusStatusApplyConfiguration) WithShardStatuses(values ...*ShardStatusApplyConfiguration) *PrometheusStatusApplyConfiguration { + for i := range values { + if values[i] == nil { + panic("nil value passed to WithShardStatuses") + } + b.ShardStatuses = append(b.ShardStatuses, *values[i]) + } + return b +} + +// WithShards sets the Shards field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Shards field is set to the value of the last call. +func (b *PrometheusStatusApplyConfiguration) WithShards(value int32) *PrometheusStatusApplyConfiguration { + b.Shards = &value + return b +} + +// WithSelector sets the Selector field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Selector field is set to the value of the last call. +func (b *PrometheusStatusApplyConfiguration) WithSelector(value string) *PrometheusStatusApplyConfiguration { + b.Selector = &value + return b +} diff --git a/vendor/github.com/prometheus-operator/prometheus-operator/pkg/client/applyconfiguration/monitoring/v1/prometheuswebspec.go b/vendor/github.com/prometheus-operator/prometheus-operator/pkg/client/applyconfiguration/monitoring/v1/prometheuswebspec.go new file mode 100644 index 0000000000..645dc9cd5e --- /dev/null +++ b/vendor/github.com/prometheus-operator/prometheus-operator/pkg/client/applyconfiguration/monitoring/v1/prometheuswebspec.go @@ -0,0 +1,63 @@ +// Copyright The prometheus-operator Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1 + +// PrometheusWebSpecApplyConfiguration represents a declarative configuration of the PrometheusWebSpec type for use +// with apply. +type PrometheusWebSpecApplyConfiguration struct { + WebConfigFileFieldsApplyConfiguration `json:",inline"` + PageTitle *string `json:"pageTitle,omitempty"` + MaxConnections *int32 `json:"maxConnections,omitempty"` +} + +// PrometheusWebSpecApplyConfiguration constructs a declarative configuration of the PrometheusWebSpec type for use with +// apply. +func PrometheusWebSpec() *PrometheusWebSpecApplyConfiguration { + return &PrometheusWebSpecApplyConfiguration{} +} + +// WithTLSConfig sets the TLSConfig field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the TLSConfig field is set to the value of the last call. +func (b *PrometheusWebSpecApplyConfiguration) WithTLSConfig(value *WebTLSConfigApplyConfiguration) *PrometheusWebSpecApplyConfiguration { + b.WebConfigFileFieldsApplyConfiguration.TLSConfig = value + return b +} + +// WithHTTPConfig sets the HTTPConfig field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the HTTPConfig field is set to the value of the last call. +func (b *PrometheusWebSpecApplyConfiguration) WithHTTPConfig(value *WebHTTPConfigApplyConfiguration) *PrometheusWebSpecApplyConfiguration { + b.WebConfigFileFieldsApplyConfiguration.HTTPConfig = value + return b +} + +// WithPageTitle sets the PageTitle field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the PageTitle field is set to the value of the last call. +func (b *PrometheusWebSpecApplyConfiguration) WithPageTitle(value string) *PrometheusWebSpecApplyConfiguration { + b.PageTitle = &value + return b +} + +// WithMaxConnections sets the MaxConnections field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the MaxConnections field is set to the value of the last call. +func (b *PrometheusWebSpecApplyConfiguration) WithMaxConnections(value int32) *PrometheusWebSpecApplyConfiguration { + b.MaxConnections = &value + return b +} diff --git a/vendor/github.com/prometheus-operator/prometheus-operator/pkg/client/applyconfiguration/monitoring/v1/proxyconfig.go b/vendor/github.com/prometheus-operator/prometheus-operator/pkg/client/applyconfiguration/monitoring/v1/proxyconfig.go new file mode 100644 index 0000000000..4b62a9d601 --- /dev/null +++ b/vendor/github.com/prometheus-operator/prometheus-operator/pkg/client/applyconfiguration/monitoring/v1/proxyconfig.go @@ -0,0 +1,74 @@ +// Copyright The prometheus-operator Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1 + +import ( + corev1 "k8s.io/api/core/v1" +) + +// ProxyConfigApplyConfiguration represents a declarative configuration of the ProxyConfig type for use +// with apply. +type ProxyConfigApplyConfiguration struct { + ProxyURL *string `json:"proxyUrl,omitempty"` + NoProxy *string `json:"noProxy,omitempty"` + ProxyFromEnvironment *bool `json:"proxyFromEnvironment,omitempty"` + ProxyConnectHeader map[string][]corev1.SecretKeySelector `json:"proxyConnectHeader,omitempty"` +} + +// ProxyConfigApplyConfiguration constructs a declarative configuration of the ProxyConfig type for use with +// apply. +func ProxyConfig() *ProxyConfigApplyConfiguration { + return &ProxyConfigApplyConfiguration{} +} + +// WithProxyURL sets the ProxyURL field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the ProxyURL field is set to the value of the last call. +func (b *ProxyConfigApplyConfiguration) WithProxyURL(value string) *ProxyConfigApplyConfiguration { + b.ProxyURL = &value + return b +} + +// WithNoProxy sets the NoProxy field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the NoProxy field is set to the value of the last call. +func (b *ProxyConfigApplyConfiguration) WithNoProxy(value string) *ProxyConfigApplyConfiguration { + b.NoProxy = &value + return b +} + +// WithProxyFromEnvironment sets the ProxyFromEnvironment field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the ProxyFromEnvironment field is set to the value of the last call. +func (b *ProxyConfigApplyConfiguration) WithProxyFromEnvironment(value bool) *ProxyConfigApplyConfiguration { + b.ProxyFromEnvironment = &value + return b +} + +// WithProxyConnectHeader puts the entries into the ProxyConnectHeader field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, the entries provided by each call will be put on the ProxyConnectHeader field, +// overwriting an existing map entries in ProxyConnectHeader field with the same key. +func (b *ProxyConfigApplyConfiguration) WithProxyConnectHeader(entries map[string][]corev1.SecretKeySelector) *ProxyConfigApplyConfiguration { + if b.ProxyConnectHeader == nil && len(entries) > 0 { + b.ProxyConnectHeader = make(map[string][]corev1.SecretKeySelector, len(entries)) + } + for k, v := range entries { + b.ProxyConnectHeader[k] = v + } + return b +} diff --git a/vendor/github.com/prometheus-operator/prometheus-operator/pkg/client/applyconfiguration/monitoring/v1/queryspec.go b/vendor/github.com/prometheus-operator/prometheus-operator/pkg/client/applyconfiguration/monitoring/v1/queryspec.go new file mode 100644 index 0000000000..04b51025dd --- /dev/null +++ b/vendor/github.com/prometheus-operator/prometheus-operator/pkg/client/applyconfiguration/monitoring/v1/queryspec.go @@ -0,0 +1,68 @@ +// Copyright The prometheus-operator Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1 + +import ( + monitoringv1 "github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring/v1" +) + +// QuerySpecApplyConfiguration represents a declarative configuration of the QuerySpec type for use +// with apply. +type QuerySpecApplyConfiguration struct { + LookbackDelta *string `json:"lookbackDelta,omitempty"` + MaxConcurrency *int32 `json:"maxConcurrency,omitempty"` + MaxSamples *int32 `json:"maxSamples,omitempty"` + Timeout *monitoringv1.Duration `json:"timeout,omitempty"` +} + +// QuerySpecApplyConfiguration constructs a declarative configuration of the QuerySpec type for use with +// apply. +func QuerySpec() *QuerySpecApplyConfiguration { + return &QuerySpecApplyConfiguration{} +} + +// WithLookbackDelta sets the LookbackDelta field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the LookbackDelta field is set to the value of the last call. +func (b *QuerySpecApplyConfiguration) WithLookbackDelta(value string) *QuerySpecApplyConfiguration { + b.LookbackDelta = &value + return b +} + +// WithMaxConcurrency sets the MaxConcurrency field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the MaxConcurrency field is set to the value of the last call. +func (b *QuerySpecApplyConfiguration) WithMaxConcurrency(value int32) *QuerySpecApplyConfiguration { + b.MaxConcurrency = &value + return b +} + +// WithMaxSamples sets the MaxSamples field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the MaxSamples field is set to the value of the last call. +func (b *QuerySpecApplyConfiguration) WithMaxSamples(value int32) *QuerySpecApplyConfiguration { + b.MaxSamples = &value + return b +} + +// WithTimeout sets the Timeout field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Timeout field is set to the value of the last call. +func (b *QuerySpecApplyConfiguration) WithTimeout(value monitoringv1.Duration) *QuerySpecApplyConfiguration { + b.Timeout = &value + return b +} diff --git a/vendor/github.com/prometheus-operator/prometheus-operator/pkg/client/applyconfiguration/monitoring/v1/queueconfig.go b/vendor/github.com/prometheus-operator/prometheus-operator/pkg/client/applyconfiguration/monitoring/v1/queueconfig.go new file mode 100644 index 0000000000..d7dcd9dc98 --- /dev/null +++ b/vendor/github.com/prometheus-operator/prometheus-operator/pkg/client/applyconfiguration/monitoring/v1/queueconfig.go @@ -0,0 +1,122 @@ +// Copyright The prometheus-operator Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1 + +import ( + monitoringv1 "github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring/v1" +) + +// QueueConfigApplyConfiguration represents a declarative configuration of the QueueConfig type for use +// with apply. +type QueueConfigApplyConfiguration struct { + Capacity *int `json:"capacity,omitempty"` + MinShards *int `json:"minShards,omitempty"` + MaxShards *int `json:"maxShards,omitempty"` + MaxSamplesPerSend *int `json:"maxSamplesPerSend,omitempty"` + BatchSendDeadline *monitoringv1.Duration `json:"batchSendDeadline,omitempty"` + MaxRetries *int `json:"maxRetries,omitempty"` + MinBackoff *monitoringv1.Duration `json:"minBackoff,omitempty"` + MaxBackoff *monitoringv1.Duration `json:"maxBackoff,omitempty"` + RetryOnRateLimit *bool `json:"retryOnRateLimit,omitempty"` + SampleAgeLimit *monitoringv1.Duration `json:"sampleAgeLimit,omitempty"` +} + +// QueueConfigApplyConfiguration constructs a declarative configuration of the QueueConfig type for use with +// apply. +func QueueConfig() *QueueConfigApplyConfiguration { + return &QueueConfigApplyConfiguration{} +} + +// WithCapacity sets the Capacity field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Capacity field is set to the value of the last call. +func (b *QueueConfigApplyConfiguration) WithCapacity(value int) *QueueConfigApplyConfiguration { + b.Capacity = &value + return b +} + +// WithMinShards sets the MinShards field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the MinShards field is set to the value of the last call. +func (b *QueueConfigApplyConfiguration) WithMinShards(value int) *QueueConfigApplyConfiguration { + b.MinShards = &value + return b +} + +// WithMaxShards sets the MaxShards field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the MaxShards field is set to the value of the last call. +func (b *QueueConfigApplyConfiguration) WithMaxShards(value int) *QueueConfigApplyConfiguration { + b.MaxShards = &value + return b +} + +// WithMaxSamplesPerSend sets the MaxSamplesPerSend field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the MaxSamplesPerSend field is set to the value of the last call. +func (b *QueueConfigApplyConfiguration) WithMaxSamplesPerSend(value int) *QueueConfigApplyConfiguration { + b.MaxSamplesPerSend = &value + return b +} + +// WithBatchSendDeadline sets the BatchSendDeadline field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the BatchSendDeadline field is set to the value of the last call. +func (b *QueueConfigApplyConfiguration) WithBatchSendDeadline(value monitoringv1.Duration) *QueueConfigApplyConfiguration { + b.BatchSendDeadline = &value + return b +} + +// WithMaxRetries sets the MaxRetries field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the MaxRetries field is set to the value of the last call. +func (b *QueueConfigApplyConfiguration) WithMaxRetries(value int) *QueueConfigApplyConfiguration { + b.MaxRetries = &value + return b +} + +// WithMinBackoff sets the MinBackoff field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the MinBackoff field is set to the value of the last call. +func (b *QueueConfigApplyConfiguration) WithMinBackoff(value monitoringv1.Duration) *QueueConfigApplyConfiguration { + b.MinBackoff = &value + return b +} + +// WithMaxBackoff sets the MaxBackoff field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the MaxBackoff field is set to the value of the last call. +func (b *QueueConfigApplyConfiguration) WithMaxBackoff(value monitoringv1.Duration) *QueueConfigApplyConfiguration { + b.MaxBackoff = &value + return b +} + +// WithRetryOnRateLimit sets the RetryOnRateLimit field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the RetryOnRateLimit field is set to the value of the last call. +func (b *QueueConfigApplyConfiguration) WithRetryOnRateLimit(value bool) *QueueConfigApplyConfiguration { + b.RetryOnRateLimit = &value + return b +} + +// WithSampleAgeLimit sets the SampleAgeLimit field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the SampleAgeLimit field is set to the value of the last call. +func (b *QueueConfigApplyConfiguration) WithSampleAgeLimit(value monitoringv1.Duration) *QueueConfigApplyConfiguration { + b.SampleAgeLimit = &value + return b +} diff --git a/vendor/github.com/prometheus-operator/prometheus-operator/pkg/client/applyconfiguration/monitoring/v1/relabelconfig.go b/vendor/github.com/prometheus-operator/prometheus-operator/pkg/client/applyconfiguration/monitoring/v1/relabelconfig.go new file mode 100644 index 0000000000..1ab3212ba4 --- /dev/null +++ b/vendor/github.com/prometheus-operator/prometheus-operator/pkg/client/applyconfiguration/monitoring/v1/relabelconfig.go @@ -0,0 +1,97 @@ +// Copyright The prometheus-operator Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1 + +import ( + monitoringv1 "github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring/v1" +) + +// RelabelConfigApplyConfiguration represents a declarative configuration of the RelabelConfig type for use +// with apply. +type RelabelConfigApplyConfiguration struct { + SourceLabels []monitoringv1.LabelName `json:"sourceLabels,omitempty"` + Separator *string `json:"separator,omitempty"` + TargetLabel *string `json:"targetLabel,omitempty"` + Regex *string `json:"regex,omitempty"` + Modulus *uint64 `json:"modulus,omitempty"` + Replacement *string `json:"replacement,omitempty"` + Action *string `json:"action,omitempty"` +} + +// RelabelConfigApplyConfiguration constructs a declarative configuration of the RelabelConfig type for use with +// apply. +func RelabelConfig() *RelabelConfigApplyConfiguration { + return &RelabelConfigApplyConfiguration{} +} + +// WithSourceLabels adds the given value to the SourceLabels field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the SourceLabels field. +func (b *RelabelConfigApplyConfiguration) WithSourceLabels(values ...monitoringv1.LabelName) *RelabelConfigApplyConfiguration { + for i := range values { + b.SourceLabels = append(b.SourceLabels, values[i]) + } + return b +} + +// WithSeparator sets the Separator field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Separator field is set to the value of the last call. +func (b *RelabelConfigApplyConfiguration) WithSeparator(value string) *RelabelConfigApplyConfiguration { + b.Separator = &value + return b +} + +// WithTargetLabel sets the TargetLabel field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the TargetLabel field is set to the value of the last call. +func (b *RelabelConfigApplyConfiguration) WithTargetLabel(value string) *RelabelConfigApplyConfiguration { + b.TargetLabel = &value + return b +} + +// WithRegex sets the Regex field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Regex field is set to the value of the last call. +func (b *RelabelConfigApplyConfiguration) WithRegex(value string) *RelabelConfigApplyConfiguration { + b.Regex = &value + return b +} + +// WithModulus sets the Modulus field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Modulus field is set to the value of the last call. +func (b *RelabelConfigApplyConfiguration) WithModulus(value uint64) *RelabelConfigApplyConfiguration { + b.Modulus = &value + return b +} + +// WithReplacement sets the Replacement field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Replacement field is set to the value of the last call. +func (b *RelabelConfigApplyConfiguration) WithReplacement(value string) *RelabelConfigApplyConfiguration { + b.Replacement = &value + return b +} + +// WithAction sets the Action field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Action field is set to the value of the last call. +func (b *RelabelConfigApplyConfiguration) WithAction(value string) *RelabelConfigApplyConfiguration { + b.Action = &value + return b +} diff --git a/vendor/github.com/prometheus-operator/prometheus-operator/pkg/client/applyconfiguration/monitoring/v1/remotereadspec.go b/vendor/github.com/prometheus-operator/prometheus-operator/pkg/client/applyconfiguration/monitoring/v1/remotereadspec.go new file mode 100644 index 0000000000..110452113f --- /dev/null +++ b/vendor/github.com/prometheus-operator/prometheus-operator/pkg/client/applyconfiguration/monitoring/v1/remotereadspec.go @@ -0,0 +1,210 @@ +// Copyright The prometheus-operator Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1 + +import ( + monitoringv1 "github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring/v1" + corev1 "k8s.io/api/core/v1" +) + +// RemoteReadSpecApplyConfiguration represents a declarative configuration of the RemoteReadSpec type for use +// with apply. +type RemoteReadSpecApplyConfiguration struct { + URL *string `json:"url,omitempty"` + Name *string `json:"name,omitempty"` + RequiredMatchers map[string]string `json:"requiredMatchers,omitempty"` + RemoteTimeout *monitoringv1.Duration `json:"remoteTimeout,omitempty"` + Headers map[string]string `json:"headers,omitempty"` + ReadRecent *bool `json:"readRecent,omitempty"` + OAuth2 *OAuth2ApplyConfiguration `json:"oauth2,omitempty"` + BasicAuth *BasicAuthApplyConfiguration `json:"basicAuth,omitempty"` + BearerTokenFile *string `json:"bearerTokenFile,omitempty"` + Authorization *AuthorizationApplyConfiguration `json:"authorization,omitempty"` + BearerToken *string `json:"bearerToken,omitempty"` + TLSConfig *TLSConfigApplyConfiguration `json:"tlsConfig,omitempty"` + ProxyConfigApplyConfiguration `json:",inline"` + FollowRedirects *bool `json:"followRedirects,omitempty"` + FilterExternalLabels *bool `json:"filterExternalLabels,omitempty"` +} + +// RemoteReadSpecApplyConfiguration constructs a declarative configuration of the RemoteReadSpec type for use with +// apply. +func RemoteReadSpec() *RemoteReadSpecApplyConfiguration { + return &RemoteReadSpecApplyConfiguration{} +} + +// WithURL sets the URL field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the URL field is set to the value of the last call. +func (b *RemoteReadSpecApplyConfiguration) WithURL(value string) *RemoteReadSpecApplyConfiguration { + b.URL = &value + return b +} + +// WithName sets the Name field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Name field is set to the value of the last call. +func (b *RemoteReadSpecApplyConfiguration) WithName(value string) *RemoteReadSpecApplyConfiguration { + b.Name = &value + return b +} + +// WithRequiredMatchers puts the entries into the RequiredMatchers field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, the entries provided by each call will be put on the RequiredMatchers field, +// overwriting an existing map entries in RequiredMatchers field with the same key. +func (b *RemoteReadSpecApplyConfiguration) WithRequiredMatchers(entries map[string]string) *RemoteReadSpecApplyConfiguration { + if b.RequiredMatchers == nil && len(entries) > 0 { + b.RequiredMatchers = make(map[string]string, len(entries)) + } + for k, v := range entries { + b.RequiredMatchers[k] = v + } + return b +} + +// WithRemoteTimeout sets the RemoteTimeout field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the RemoteTimeout field is set to the value of the last call. +func (b *RemoteReadSpecApplyConfiguration) WithRemoteTimeout(value monitoringv1.Duration) *RemoteReadSpecApplyConfiguration { + b.RemoteTimeout = &value + return b +} + +// WithHeaders puts the entries into the Headers field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, the entries provided by each call will be put on the Headers field, +// overwriting an existing map entries in Headers field with the same key. +func (b *RemoteReadSpecApplyConfiguration) WithHeaders(entries map[string]string) *RemoteReadSpecApplyConfiguration { + if b.Headers == nil && len(entries) > 0 { + b.Headers = make(map[string]string, len(entries)) + } + for k, v := range entries { + b.Headers[k] = v + } + return b +} + +// WithReadRecent sets the ReadRecent field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the ReadRecent field is set to the value of the last call. +func (b *RemoteReadSpecApplyConfiguration) WithReadRecent(value bool) *RemoteReadSpecApplyConfiguration { + b.ReadRecent = &value + return b +} + +// WithOAuth2 sets the OAuth2 field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the OAuth2 field is set to the value of the last call. +func (b *RemoteReadSpecApplyConfiguration) WithOAuth2(value *OAuth2ApplyConfiguration) *RemoteReadSpecApplyConfiguration { + b.OAuth2 = value + return b +} + +// WithBasicAuth sets the BasicAuth field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the BasicAuth field is set to the value of the last call. +func (b *RemoteReadSpecApplyConfiguration) WithBasicAuth(value *BasicAuthApplyConfiguration) *RemoteReadSpecApplyConfiguration { + b.BasicAuth = value + return b +} + +// WithBearerTokenFile sets the BearerTokenFile field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the BearerTokenFile field is set to the value of the last call. +func (b *RemoteReadSpecApplyConfiguration) WithBearerTokenFile(value string) *RemoteReadSpecApplyConfiguration { + b.BearerTokenFile = &value + return b +} + +// WithAuthorization sets the Authorization field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Authorization field is set to the value of the last call. +func (b *RemoteReadSpecApplyConfiguration) WithAuthorization(value *AuthorizationApplyConfiguration) *RemoteReadSpecApplyConfiguration { + b.Authorization = value + return b +} + +// WithBearerToken sets the BearerToken field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the BearerToken field is set to the value of the last call. +func (b *RemoteReadSpecApplyConfiguration) WithBearerToken(value string) *RemoteReadSpecApplyConfiguration { + b.BearerToken = &value + return b +} + +// WithTLSConfig sets the TLSConfig field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the TLSConfig field is set to the value of the last call. +func (b *RemoteReadSpecApplyConfiguration) WithTLSConfig(value *TLSConfigApplyConfiguration) *RemoteReadSpecApplyConfiguration { + b.TLSConfig = value + return b +} + +// WithProxyURL sets the ProxyURL field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the ProxyURL field is set to the value of the last call. +func (b *RemoteReadSpecApplyConfiguration) WithProxyURL(value string) *RemoteReadSpecApplyConfiguration { + b.ProxyConfigApplyConfiguration.ProxyURL = &value + return b +} + +// WithNoProxy sets the NoProxy field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the NoProxy field is set to the value of the last call. +func (b *RemoteReadSpecApplyConfiguration) WithNoProxy(value string) *RemoteReadSpecApplyConfiguration { + b.ProxyConfigApplyConfiguration.NoProxy = &value + return b +} + +// WithProxyFromEnvironment sets the ProxyFromEnvironment field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the ProxyFromEnvironment field is set to the value of the last call. +func (b *RemoteReadSpecApplyConfiguration) WithProxyFromEnvironment(value bool) *RemoteReadSpecApplyConfiguration { + b.ProxyConfigApplyConfiguration.ProxyFromEnvironment = &value + return b +} + +// WithProxyConnectHeader puts the entries into the ProxyConnectHeader field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, the entries provided by each call will be put on the ProxyConnectHeader field, +// overwriting an existing map entries in ProxyConnectHeader field with the same key. +func (b *RemoteReadSpecApplyConfiguration) WithProxyConnectHeader(entries map[string][]corev1.SecretKeySelector) *RemoteReadSpecApplyConfiguration { + if b.ProxyConfigApplyConfiguration.ProxyConnectHeader == nil && len(entries) > 0 { + b.ProxyConfigApplyConfiguration.ProxyConnectHeader = make(map[string][]corev1.SecretKeySelector, len(entries)) + } + for k, v := range entries { + b.ProxyConfigApplyConfiguration.ProxyConnectHeader[k] = v + } + return b +} + +// WithFollowRedirects sets the FollowRedirects field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the FollowRedirects field is set to the value of the last call. +func (b *RemoteReadSpecApplyConfiguration) WithFollowRedirects(value bool) *RemoteReadSpecApplyConfiguration { + b.FollowRedirects = &value + return b +} + +// WithFilterExternalLabels sets the FilterExternalLabels field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the FilterExternalLabels field is set to the value of the last call. +func (b *RemoteReadSpecApplyConfiguration) WithFilterExternalLabels(value bool) *RemoteReadSpecApplyConfiguration { + b.FilterExternalLabels = &value + return b +} diff --git a/vendor/github.com/prometheus-operator/prometheus-operator/pkg/client/applyconfiguration/monitoring/v1/remotewritespec.go b/vendor/github.com/prometheus-operator/prometheus-operator/pkg/client/applyconfiguration/monitoring/v1/remotewritespec.go new file mode 100644 index 0000000000..0ac7e0b66f --- /dev/null +++ b/vendor/github.com/prometheus-operator/prometheus-operator/pkg/client/applyconfiguration/monitoring/v1/remotewritespec.go @@ -0,0 +1,272 @@ +// Copyright The prometheus-operator Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1 + +import ( + monitoringv1 "github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring/v1" + corev1 "k8s.io/api/core/v1" +) + +// RemoteWriteSpecApplyConfiguration represents a declarative configuration of the RemoteWriteSpec type for use +// with apply. +type RemoteWriteSpecApplyConfiguration struct { + URL *string `json:"url,omitempty"` + Name *string `json:"name,omitempty"` + MessageVersion *monitoringv1.RemoteWriteMessageVersion `json:"messageVersion,omitempty"` + SendExemplars *bool `json:"sendExemplars,omitempty"` + SendNativeHistograms *bool `json:"sendNativeHistograms,omitempty"` + RemoteTimeout *monitoringv1.Duration `json:"remoteTimeout,omitempty"` + Headers map[string]string `json:"headers,omitempty"` + WriteRelabelConfigs []RelabelConfigApplyConfiguration `json:"writeRelabelConfigs,omitempty"` + OAuth2 *OAuth2ApplyConfiguration `json:"oauth2,omitempty"` + BasicAuth *BasicAuthApplyConfiguration `json:"basicAuth,omitempty"` + BearerTokenFile *string `json:"bearerTokenFile,omitempty"` + Authorization *AuthorizationApplyConfiguration `json:"authorization,omitempty"` + Sigv4 *Sigv4ApplyConfiguration `json:"sigv4,omitempty"` + AzureAD *AzureADApplyConfiguration `json:"azureAd,omitempty"` + BearerToken *string `json:"bearerToken,omitempty"` + TLSConfig *TLSConfigApplyConfiguration `json:"tlsConfig,omitempty"` + ProxyConfigApplyConfiguration `json:",inline"` + FollowRedirects *bool `json:"followRedirects,omitempty"` + QueueConfig *QueueConfigApplyConfiguration `json:"queueConfig,omitempty"` + MetadataConfig *MetadataConfigApplyConfiguration `json:"metadataConfig,omitempty"` + EnableHttp2 *bool `json:"enableHTTP2,omitempty"` + RoundRobinDNS *bool `json:"roundRobinDNS,omitempty"` +} + +// RemoteWriteSpecApplyConfiguration constructs a declarative configuration of the RemoteWriteSpec type for use with +// apply. +func RemoteWriteSpec() *RemoteWriteSpecApplyConfiguration { + return &RemoteWriteSpecApplyConfiguration{} +} + +// WithURL sets the URL field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the URL field is set to the value of the last call. +func (b *RemoteWriteSpecApplyConfiguration) WithURL(value string) *RemoteWriteSpecApplyConfiguration { + b.URL = &value + return b +} + +// WithName sets the Name field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Name field is set to the value of the last call. +func (b *RemoteWriteSpecApplyConfiguration) WithName(value string) *RemoteWriteSpecApplyConfiguration { + b.Name = &value + return b +} + +// WithMessageVersion sets the MessageVersion field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the MessageVersion field is set to the value of the last call. +func (b *RemoteWriteSpecApplyConfiguration) WithMessageVersion(value monitoringv1.RemoteWriteMessageVersion) *RemoteWriteSpecApplyConfiguration { + b.MessageVersion = &value + return b +} + +// WithSendExemplars sets the SendExemplars field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the SendExemplars field is set to the value of the last call. +func (b *RemoteWriteSpecApplyConfiguration) WithSendExemplars(value bool) *RemoteWriteSpecApplyConfiguration { + b.SendExemplars = &value + return b +} + +// WithSendNativeHistograms sets the SendNativeHistograms field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the SendNativeHistograms field is set to the value of the last call. +func (b *RemoteWriteSpecApplyConfiguration) WithSendNativeHistograms(value bool) *RemoteWriteSpecApplyConfiguration { + b.SendNativeHistograms = &value + return b +} + +// WithRemoteTimeout sets the RemoteTimeout field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the RemoteTimeout field is set to the value of the last call. +func (b *RemoteWriteSpecApplyConfiguration) WithRemoteTimeout(value monitoringv1.Duration) *RemoteWriteSpecApplyConfiguration { + b.RemoteTimeout = &value + return b +} + +// WithHeaders puts the entries into the Headers field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, the entries provided by each call will be put on the Headers field, +// overwriting an existing map entries in Headers field with the same key. +func (b *RemoteWriteSpecApplyConfiguration) WithHeaders(entries map[string]string) *RemoteWriteSpecApplyConfiguration { + if b.Headers == nil && len(entries) > 0 { + b.Headers = make(map[string]string, len(entries)) + } + for k, v := range entries { + b.Headers[k] = v + } + return b +} + +// WithWriteRelabelConfigs adds the given value to the WriteRelabelConfigs field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the WriteRelabelConfigs field. +func (b *RemoteWriteSpecApplyConfiguration) WithWriteRelabelConfigs(values ...*RelabelConfigApplyConfiguration) *RemoteWriteSpecApplyConfiguration { + for i := range values { + if values[i] == nil { + panic("nil value passed to WithWriteRelabelConfigs") + } + b.WriteRelabelConfigs = append(b.WriteRelabelConfigs, *values[i]) + } + return b +} + +// WithOAuth2 sets the OAuth2 field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the OAuth2 field is set to the value of the last call. +func (b *RemoteWriteSpecApplyConfiguration) WithOAuth2(value *OAuth2ApplyConfiguration) *RemoteWriteSpecApplyConfiguration { + b.OAuth2 = value + return b +} + +// WithBasicAuth sets the BasicAuth field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the BasicAuth field is set to the value of the last call. +func (b *RemoteWriteSpecApplyConfiguration) WithBasicAuth(value *BasicAuthApplyConfiguration) *RemoteWriteSpecApplyConfiguration { + b.BasicAuth = value + return b +} + +// WithBearerTokenFile sets the BearerTokenFile field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the BearerTokenFile field is set to the value of the last call. +func (b *RemoteWriteSpecApplyConfiguration) WithBearerTokenFile(value string) *RemoteWriteSpecApplyConfiguration { + b.BearerTokenFile = &value + return b +} + +// WithAuthorization sets the Authorization field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Authorization field is set to the value of the last call. +func (b *RemoteWriteSpecApplyConfiguration) WithAuthorization(value *AuthorizationApplyConfiguration) *RemoteWriteSpecApplyConfiguration { + b.Authorization = value + return b +} + +// WithSigv4 sets the Sigv4 field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Sigv4 field is set to the value of the last call. +func (b *RemoteWriteSpecApplyConfiguration) WithSigv4(value *Sigv4ApplyConfiguration) *RemoteWriteSpecApplyConfiguration { + b.Sigv4 = value + return b +} + +// WithAzureAD sets the AzureAD field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the AzureAD field is set to the value of the last call. +func (b *RemoteWriteSpecApplyConfiguration) WithAzureAD(value *AzureADApplyConfiguration) *RemoteWriteSpecApplyConfiguration { + b.AzureAD = value + return b +} + +// WithBearerToken sets the BearerToken field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the BearerToken field is set to the value of the last call. +func (b *RemoteWriteSpecApplyConfiguration) WithBearerToken(value string) *RemoteWriteSpecApplyConfiguration { + b.BearerToken = &value + return b +} + +// WithTLSConfig sets the TLSConfig field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the TLSConfig field is set to the value of the last call. +func (b *RemoteWriteSpecApplyConfiguration) WithTLSConfig(value *TLSConfigApplyConfiguration) *RemoteWriteSpecApplyConfiguration { + b.TLSConfig = value + return b +} + +// WithProxyURL sets the ProxyURL field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the ProxyURL field is set to the value of the last call. +func (b *RemoteWriteSpecApplyConfiguration) WithProxyURL(value string) *RemoteWriteSpecApplyConfiguration { + b.ProxyConfigApplyConfiguration.ProxyURL = &value + return b +} + +// WithNoProxy sets the NoProxy field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the NoProxy field is set to the value of the last call. +func (b *RemoteWriteSpecApplyConfiguration) WithNoProxy(value string) *RemoteWriteSpecApplyConfiguration { + b.ProxyConfigApplyConfiguration.NoProxy = &value + return b +} + +// WithProxyFromEnvironment sets the ProxyFromEnvironment field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the ProxyFromEnvironment field is set to the value of the last call. +func (b *RemoteWriteSpecApplyConfiguration) WithProxyFromEnvironment(value bool) *RemoteWriteSpecApplyConfiguration { + b.ProxyConfigApplyConfiguration.ProxyFromEnvironment = &value + return b +} + +// WithProxyConnectHeader puts the entries into the ProxyConnectHeader field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, the entries provided by each call will be put on the ProxyConnectHeader field, +// overwriting an existing map entries in ProxyConnectHeader field with the same key. +func (b *RemoteWriteSpecApplyConfiguration) WithProxyConnectHeader(entries map[string][]corev1.SecretKeySelector) *RemoteWriteSpecApplyConfiguration { + if b.ProxyConfigApplyConfiguration.ProxyConnectHeader == nil && len(entries) > 0 { + b.ProxyConfigApplyConfiguration.ProxyConnectHeader = make(map[string][]corev1.SecretKeySelector, len(entries)) + } + for k, v := range entries { + b.ProxyConfigApplyConfiguration.ProxyConnectHeader[k] = v + } + return b +} + +// WithFollowRedirects sets the FollowRedirects field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the FollowRedirects field is set to the value of the last call. +func (b *RemoteWriteSpecApplyConfiguration) WithFollowRedirects(value bool) *RemoteWriteSpecApplyConfiguration { + b.FollowRedirects = &value + return b +} + +// WithQueueConfig sets the QueueConfig field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the QueueConfig field is set to the value of the last call. +func (b *RemoteWriteSpecApplyConfiguration) WithQueueConfig(value *QueueConfigApplyConfiguration) *RemoteWriteSpecApplyConfiguration { + b.QueueConfig = value + return b +} + +// WithMetadataConfig sets the MetadataConfig field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the MetadataConfig field is set to the value of the last call. +func (b *RemoteWriteSpecApplyConfiguration) WithMetadataConfig(value *MetadataConfigApplyConfiguration) *RemoteWriteSpecApplyConfiguration { + b.MetadataConfig = value + return b +} + +// WithEnableHttp2 sets the EnableHttp2 field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the EnableHttp2 field is set to the value of the last call. +func (b *RemoteWriteSpecApplyConfiguration) WithEnableHttp2(value bool) *RemoteWriteSpecApplyConfiguration { + b.EnableHttp2 = &value + return b +} + +// WithRoundRobinDNS sets the RoundRobinDNS field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the RoundRobinDNS field is set to the value of the last call. +func (b *RemoteWriteSpecApplyConfiguration) WithRoundRobinDNS(value bool) *RemoteWriteSpecApplyConfiguration { + b.RoundRobinDNS = &value + return b +} diff --git a/vendor/github.com/prometheus-operator/prometheus-operator/pkg/client/applyconfiguration/monitoring/v1/retainconfig.go b/vendor/github.com/prometheus-operator/prometheus-operator/pkg/client/applyconfiguration/monitoring/v1/retainconfig.go new file mode 100644 index 0000000000..f0bfc6207d --- /dev/null +++ b/vendor/github.com/prometheus-operator/prometheus-operator/pkg/client/applyconfiguration/monitoring/v1/retainconfig.go @@ -0,0 +1,41 @@ +// Copyright The prometheus-operator Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1 + +import ( + monitoringv1 "github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring/v1" +) + +// RetainConfigApplyConfiguration represents a declarative configuration of the RetainConfig type for use +// with apply. +type RetainConfigApplyConfiguration struct { + RetentionPeriod *monitoringv1.Duration `json:"retentionPeriod,omitempty"` +} + +// RetainConfigApplyConfiguration constructs a declarative configuration of the RetainConfig type for use with +// apply. +func RetainConfig() *RetainConfigApplyConfiguration { + return &RetainConfigApplyConfiguration{} +} + +// WithRetentionPeriod sets the RetentionPeriod field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the RetentionPeriod field is set to the value of the last call. +func (b *RetainConfigApplyConfiguration) WithRetentionPeriod(value monitoringv1.Duration) *RetainConfigApplyConfiguration { + b.RetentionPeriod = &value + return b +} diff --git a/vendor/github.com/prometheus-operator/prometheus-operator/pkg/client/applyconfiguration/monitoring/v1/rollingupdatestatefulsetstrategy.go b/vendor/github.com/prometheus-operator/prometheus-operator/pkg/client/applyconfiguration/monitoring/v1/rollingupdatestatefulsetstrategy.go new file mode 100644 index 0000000000..5c09187f4a --- /dev/null +++ b/vendor/github.com/prometheus-operator/prometheus-operator/pkg/client/applyconfiguration/monitoring/v1/rollingupdatestatefulsetstrategy.go @@ -0,0 +1,41 @@ +// Copyright The prometheus-operator Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1 + +import ( + intstr "k8s.io/apimachinery/pkg/util/intstr" +) + +// RollingUpdateStatefulSetStrategyApplyConfiguration represents a declarative configuration of the RollingUpdateStatefulSetStrategy type for use +// with apply. +type RollingUpdateStatefulSetStrategyApplyConfiguration struct { + MaxUnavailable *intstr.IntOrString `json:"maxUnavailable,omitempty"` +} + +// RollingUpdateStatefulSetStrategyApplyConfiguration constructs a declarative configuration of the RollingUpdateStatefulSetStrategy type for use with +// apply. +func RollingUpdateStatefulSetStrategy() *RollingUpdateStatefulSetStrategyApplyConfiguration { + return &RollingUpdateStatefulSetStrategyApplyConfiguration{} +} + +// WithMaxUnavailable sets the MaxUnavailable field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the MaxUnavailable field is set to the value of the last call. +func (b *RollingUpdateStatefulSetStrategyApplyConfiguration) WithMaxUnavailable(value intstr.IntOrString) *RollingUpdateStatefulSetStrategyApplyConfiguration { + b.MaxUnavailable = &value + return b +} diff --git a/vendor/github.com/prometheus-operator/prometheus-operator/pkg/client/applyconfiguration/monitoring/v1/rule.go b/vendor/github.com/prometheus-operator/prometheus-operator/pkg/client/applyconfiguration/monitoring/v1/rule.go new file mode 100644 index 0000000000..2e6b9f8be9 --- /dev/null +++ b/vendor/github.com/prometheus-operator/prometheus-operator/pkg/client/applyconfiguration/monitoring/v1/rule.go @@ -0,0 +1,108 @@ +// Copyright The prometheus-operator Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1 + +import ( + monitoringv1 "github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring/v1" + intstr "k8s.io/apimachinery/pkg/util/intstr" +) + +// RuleApplyConfiguration represents a declarative configuration of the Rule type for use +// with apply. +type RuleApplyConfiguration struct { + Record *string `json:"record,omitempty"` + Alert *string `json:"alert,omitempty"` + Expr *intstr.IntOrString `json:"expr,omitempty"` + For *monitoringv1.Duration `json:"for,omitempty"` + KeepFiringFor *monitoringv1.NonEmptyDuration `json:"keep_firing_for,omitempty"` + Labels map[string]string `json:"labels,omitempty"` + Annotations map[string]string `json:"annotations,omitempty"` +} + +// RuleApplyConfiguration constructs a declarative configuration of the Rule type for use with +// apply. +func Rule() *RuleApplyConfiguration { + return &RuleApplyConfiguration{} +} + +// WithRecord sets the Record field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Record field is set to the value of the last call. +func (b *RuleApplyConfiguration) WithRecord(value string) *RuleApplyConfiguration { + b.Record = &value + return b +} + +// WithAlert sets the Alert field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Alert field is set to the value of the last call. +func (b *RuleApplyConfiguration) WithAlert(value string) *RuleApplyConfiguration { + b.Alert = &value + return b +} + +// WithExpr sets the Expr field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Expr field is set to the value of the last call. +func (b *RuleApplyConfiguration) WithExpr(value intstr.IntOrString) *RuleApplyConfiguration { + b.Expr = &value + return b +} + +// WithFor sets the For field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the For field is set to the value of the last call. +func (b *RuleApplyConfiguration) WithFor(value monitoringv1.Duration) *RuleApplyConfiguration { + b.For = &value + return b +} + +// WithKeepFiringFor sets the KeepFiringFor field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the KeepFiringFor field is set to the value of the last call. +func (b *RuleApplyConfiguration) WithKeepFiringFor(value monitoringv1.NonEmptyDuration) *RuleApplyConfiguration { + b.KeepFiringFor = &value + return b +} + +// WithLabels puts the entries into the Labels field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, the entries provided by each call will be put on the Labels field, +// overwriting an existing map entries in Labels field with the same key. +func (b *RuleApplyConfiguration) WithLabels(entries map[string]string) *RuleApplyConfiguration { + if b.Labels == nil && len(entries) > 0 { + b.Labels = make(map[string]string, len(entries)) + } + for k, v := range entries { + b.Labels[k] = v + } + return b +} + +// WithAnnotations puts the entries into the Annotations field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, the entries provided by each call will be put on the Annotations field, +// overwriting an existing map entries in Annotations field with the same key. +func (b *RuleApplyConfiguration) WithAnnotations(entries map[string]string) *RuleApplyConfiguration { + if b.Annotations == nil && len(entries) > 0 { + b.Annotations = make(map[string]string, len(entries)) + } + for k, v := range entries { + b.Annotations[k] = v + } + return b +} diff --git a/vendor/github.com/prometheus-operator/prometheus-operator/pkg/client/applyconfiguration/monitoring/v1/rulegroup.go b/vendor/github.com/prometheus-operator/prometheus-operator/pkg/client/applyconfiguration/monitoring/v1/rulegroup.go new file mode 100644 index 0000000000..12a1b94cc1 --- /dev/null +++ b/vendor/github.com/prometheus-operator/prometheus-operator/pkg/client/applyconfiguration/monitoring/v1/rulegroup.go @@ -0,0 +1,106 @@ +// Copyright The prometheus-operator Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1 + +import ( + monitoringv1 "github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring/v1" +) + +// RuleGroupApplyConfiguration represents a declarative configuration of the RuleGroup type for use +// with apply. +type RuleGroupApplyConfiguration struct { + Name *string `json:"name,omitempty"` + Labels map[string]string `json:"labels,omitempty"` + Interval *monitoringv1.Duration `json:"interval,omitempty"` + QueryOffset *monitoringv1.Duration `json:"query_offset,omitempty"` + Rules []RuleApplyConfiguration `json:"rules,omitempty"` + PartialResponseStrategy *string `json:"partial_response_strategy,omitempty"` + Limit *int `json:"limit,omitempty"` +} + +// RuleGroupApplyConfiguration constructs a declarative configuration of the RuleGroup type for use with +// apply. +func RuleGroup() *RuleGroupApplyConfiguration { + return &RuleGroupApplyConfiguration{} +} + +// WithName sets the Name field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Name field is set to the value of the last call. +func (b *RuleGroupApplyConfiguration) WithName(value string) *RuleGroupApplyConfiguration { + b.Name = &value + return b +} + +// WithLabels puts the entries into the Labels field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, the entries provided by each call will be put on the Labels field, +// overwriting an existing map entries in Labels field with the same key. +func (b *RuleGroupApplyConfiguration) WithLabels(entries map[string]string) *RuleGroupApplyConfiguration { + if b.Labels == nil && len(entries) > 0 { + b.Labels = make(map[string]string, len(entries)) + } + for k, v := range entries { + b.Labels[k] = v + } + return b +} + +// WithInterval sets the Interval field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Interval field is set to the value of the last call. +func (b *RuleGroupApplyConfiguration) WithInterval(value monitoringv1.Duration) *RuleGroupApplyConfiguration { + b.Interval = &value + return b +} + +// WithQueryOffset sets the QueryOffset field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the QueryOffset field is set to the value of the last call. +func (b *RuleGroupApplyConfiguration) WithQueryOffset(value monitoringv1.Duration) *RuleGroupApplyConfiguration { + b.QueryOffset = &value + return b +} + +// WithRules adds the given value to the Rules field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the Rules field. +func (b *RuleGroupApplyConfiguration) WithRules(values ...*RuleApplyConfiguration) *RuleGroupApplyConfiguration { + for i := range values { + if values[i] == nil { + panic("nil value passed to WithRules") + } + b.Rules = append(b.Rules, *values[i]) + } + return b +} + +// WithPartialResponseStrategy sets the PartialResponseStrategy field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the PartialResponseStrategy field is set to the value of the last call. +func (b *RuleGroupApplyConfiguration) WithPartialResponseStrategy(value string) *RuleGroupApplyConfiguration { + b.PartialResponseStrategy = &value + return b +} + +// WithLimit sets the Limit field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Limit field is set to the value of the last call. +func (b *RuleGroupApplyConfiguration) WithLimit(value int) *RuleGroupApplyConfiguration { + b.Limit = &value + return b +} diff --git a/vendor/github.com/prometheus-operator/prometheus-operator/pkg/client/applyconfiguration/monitoring/v1/rules.go b/vendor/github.com/prometheus-operator/prometheus-operator/pkg/client/applyconfiguration/monitoring/v1/rules.go new file mode 100644 index 0000000000..1ae1e1f9f7 --- /dev/null +++ b/vendor/github.com/prometheus-operator/prometheus-operator/pkg/client/applyconfiguration/monitoring/v1/rules.go @@ -0,0 +1,37 @@ +// Copyright The prometheus-operator Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1 + +// RulesApplyConfiguration represents a declarative configuration of the Rules type for use +// with apply. +type RulesApplyConfiguration struct { + Alert *RulesAlertApplyConfiguration `json:"alert,omitempty"` +} + +// RulesApplyConfiguration constructs a declarative configuration of the Rules type for use with +// apply. +func Rules() *RulesApplyConfiguration { + return &RulesApplyConfiguration{} +} + +// WithAlert sets the Alert field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Alert field is set to the value of the last call. +func (b *RulesApplyConfiguration) WithAlert(value *RulesAlertApplyConfiguration) *RulesApplyConfiguration { + b.Alert = value + return b +} diff --git a/vendor/github.com/prometheus-operator/prometheus-operator/pkg/client/applyconfiguration/monitoring/v1/rulesalert.go b/vendor/github.com/prometheus-operator/prometheus-operator/pkg/client/applyconfiguration/monitoring/v1/rulesalert.go new file mode 100644 index 0000000000..9428363f91 --- /dev/null +++ b/vendor/github.com/prometheus-operator/prometheus-operator/pkg/client/applyconfiguration/monitoring/v1/rulesalert.go @@ -0,0 +1,55 @@ +// Copyright The prometheus-operator Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1 + +// RulesAlertApplyConfiguration represents a declarative configuration of the RulesAlert type for use +// with apply. +type RulesAlertApplyConfiguration struct { + ForOutageTolerance *string `json:"forOutageTolerance,omitempty"` + ForGracePeriod *string `json:"forGracePeriod,omitempty"` + ResendDelay *string `json:"resendDelay,omitempty"` +} + +// RulesAlertApplyConfiguration constructs a declarative configuration of the RulesAlert type for use with +// apply. +func RulesAlert() *RulesAlertApplyConfiguration { + return &RulesAlertApplyConfiguration{} +} + +// WithForOutageTolerance sets the ForOutageTolerance field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the ForOutageTolerance field is set to the value of the last call. +func (b *RulesAlertApplyConfiguration) WithForOutageTolerance(value string) *RulesAlertApplyConfiguration { + b.ForOutageTolerance = &value + return b +} + +// WithForGracePeriod sets the ForGracePeriod field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the ForGracePeriod field is set to the value of the last call. +func (b *RulesAlertApplyConfiguration) WithForGracePeriod(value string) *RulesAlertApplyConfiguration { + b.ForGracePeriod = &value + return b +} + +// WithResendDelay sets the ResendDelay field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the ResendDelay field is set to the value of the last call. +func (b *RulesAlertApplyConfiguration) WithResendDelay(value string) *RulesAlertApplyConfiguration { + b.ResendDelay = &value + return b +} diff --git a/vendor/github.com/prometheus-operator/prometheus-operator/pkg/client/applyconfiguration/monitoring/v1/runtimeconfig.go b/vendor/github.com/prometheus-operator/prometheus-operator/pkg/client/applyconfiguration/monitoring/v1/runtimeconfig.go new file mode 100644 index 0000000000..2e65f6c75d --- /dev/null +++ b/vendor/github.com/prometheus-operator/prometheus-operator/pkg/client/applyconfiguration/monitoring/v1/runtimeconfig.go @@ -0,0 +1,37 @@ +// Copyright The prometheus-operator Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1 + +// RuntimeConfigApplyConfiguration represents a declarative configuration of the RuntimeConfig type for use +// with apply. +type RuntimeConfigApplyConfiguration struct { + GoGC *int32 `json:"goGC,omitempty"` +} + +// RuntimeConfigApplyConfiguration constructs a declarative configuration of the RuntimeConfig type for use with +// apply. +func RuntimeConfig() *RuntimeConfigApplyConfiguration { + return &RuntimeConfigApplyConfiguration{} +} + +// WithGoGC sets the GoGC field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the GoGC field is set to the value of the last call. +func (b *RuntimeConfigApplyConfiguration) WithGoGC(value int32) *RuntimeConfigApplyConfiguration { + b.GoGC = &value + return b +} diff --git a/vendor/github.com/prometheus-operator/prometheus-operator/pkg/client/applyconfiguration/monitoring/v1/safeauthorization.go b/vendor/github.com/prometheus-operator/prometheus-operator/pkg/client/applyconfiguration/monitoring/v1/safeauthorization.go new file mode 100644 index 0000000000..702989ec6f --- /dev/null +++ b/vendor/github.com/prometheus-operator/prometheus-operator/pkg/client/applyconfiguration/monitoring/v1/safeauthorization.go @@ -0,0 +1,50 @@ +// Copyright The prometheus-operator Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1 + +import ( + corev1 "k8s.io/api/core/v1" +) + +// SafeAuthorizationApplyConfiguration represents a declarative configuration of the SafeAuthorization type for use +// with apply. +type SafeAuthorizationApplyConfiguration struct { + Type *string `json:"type,omitempty"` + Credentials *corev1.SecretKeySelector `json:"credentials,omitempty"` +} + +// SafeAuthorizationApplyConfiguration constructs a declarative configuration of the SafeAuthorization type for use with +// apply. +func SafeAuthorization() *SafeAuthorizationApplyConfiguration { + return &SafeAuthorizationApplyConfiguration{} +} + +// WithType sets the Type field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Type field is set to the value of the last call. +func (b *SafeAuthorizationApplyConfiguration) WithType(value string) *SafeAuthorizationApplyConfiguration { + b.Type = &value + return b +} + +// WithCredentials sets the Credentials field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Credentials field is set to the value of the last call. +func (b *SafeAuthorizationApplyConfiguration) WithCredentials(value corev1.SecretKeySelector) *SafeAuthorizationApplyConfiguration { + b.Credentials = &value + return b +} diff --git a/vendor/github.com/prometheus-operator/prometheus-operator/pkg/client/applyconfiguration/monitoring/v1/safetlsconfig.go b/vendor/github.com/prometheus-operator/prometheus-operator/pkg/client/applyconfiguration/monitoring/v1/safetlsconfig.go new file mode 100644 index 0000000000..dfc1cb31d9 --- /dev/null +++ b/vendor/github.com/prometheus-operator/prometheus-operator/pkg/client/applyconfiguration/monitoring/v1/safetlsconfig.go @@ -0,0 +1,96 @@ +// Copyright The prometheus-operator Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1 + +import ( + monitoringv1 "github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring/v1" + corev1 "k8s.io/api/core/v1" +) + +// SafeTLSConfigApplyConfiguration represents a declarative configuration of the SafeTLSConfig type for use +// with apply. +type SafeTLSConfigApplyConfiguration struct { + CA *SecretOrConfigMapApplyConfiguration `json:"ca,omitempty"` + Cert *SecretOrConfigMapApplyConfiguration `json:"cert,omitempty"` + KeySecret *corev1.SecretKeySelector `json:"keySecret,omitempty"` + ServerName *string `json:"serverName,omitempty"` + InsecureSkipVerify *bool `json:"insecureSkipVerify,omitempty"` + MinVersion *monitoringv1.TLSVersion `json:"minVersion,omitempty"` + MaxVersion *monitoringv1.TLSVersion `json:"maxVersion,omitempty"` +} + +// SafeTLSConfigApplyConfiguration constructs a declarative configuration of the SafeTLSConfig type for use with +// apply. +func SafeTLSConfig() *SafeTLSConfigApplyConfiguration { + return &SafeTLSConfigApplyConfiguration{} +} + +// WithCA sets the CA field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the CA field is set to the value of the last call. +func (b *SafeTLSConfigApplyConfiguration) WithCA(value *SecretOrConfigMapApplyConfiguration) *SafeTLSConfigApplyConfiguration { + b.CA = value + return b +} + +// WithCert sets the Cert field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Cert field is set to the value of the last call. +func (b *SafeTLSConfigApplyConfiguration) WithCert(value *SecretOrConfigMapApplyConfiguration) *SafeTLSConfigApplyConfiguration { + b.Cert = value + return b +} + +// WithKeySecret sets the KeySecret field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the KeySecret field is set to the value of the last call. +func (b *SafeTLSConfigApplyConfiguration) WithKeySecret(value corev1.SecretKeySelector) *SafeTLSConfigApplyConfiguration { + b.KeySecret = &value + return b +} + +// WithServerName sets the ServerName field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the ServerName field is set to the value of the last call. +func (b *SafeTLSConfigApplyConfiguration) WithServerName(value string) *SafeTLSConfigApplyConfiguration { + b.ServerName = &value + return b +} + +// WithInsecureSkipVerify sets the InsecureSkipVerify field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the InsecureSkipVerify field is set to the value of the last call. +func (b *SafeTLSConfigApplyConfiguration) WithInsecureSkipVerify(value bool) *SafeTLSConfigApplyConfiguration { + b.InsecureSkipVerify = &value + return b +} + +// WithMinVersion sets the MinVersion field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the MinVersion field is set to the value of the last call. +func (b *SafeTLSConfigApplyConfiguration) WithMinVersion(value monitoringv1.TLSVersion) *SafeTLSConfigApplyConfiguration { + b.MinVersion = &value + return b +} + +// WithMaxVersion sets the MaxVersion field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the MaxVersion field is set to the value of the last call. +func (b *SafeTLSConfigApplyConfiguration) WithMaxVersion(value monitoringv1.TLSVersion) *SafeTLSConfigApplyConfiguration { + b.MaxVersion = &value + return b +} diff --git a/vendor/github.com/prometheus-operator/prometheus-operator/pkg/client/applyconfiguration/monitoring/v1/scrapeclass.go b/vendor/github.com/prometheus-operator/prometheus-operator/pkg/client/applyconfiguration/monitoring/v1/scrapeclass.go new file mode 100644 index 0000000000..c8883c9ede --- /dev/null +++ b/vendor/github.com/prometheus-operator/prometheus-operator/pkg/client/applyconfiguration/monitoring/v1/scrapeclass.go @@ -0,0 +1,114 @@ +// Copyright The prometheus-operator Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1 + +import ( + monitoringv1 "github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring/v1" +) + +// ScrapeClassApplyConfiguration represents a declarative configuration of the ScrapeClass type for use +// with apply. +type ScrapeClassApplyConfiguration struct { + Name *string `json:"name,omitempty"` + Default *bool `json:"default,omitempty"` + FallbackScrapeProtocol *monitoringv1.ScrapeProtocol `json:"fallbackScrapeProtocol,omitempty"` + TLSConfig *TLSConfigApplyConfiguration `json:"tlsConfig,omitempty"` + Authorization *AuthorizationApplyConfiguration `json:"authorization,omitempty"` + Relabelings []RelabelConfigApplyConfiguration `json:"relabelings,omitempty"` + MetricRelabelings []RelabelConfigApplyConfiguration `json:"metricRelabelings,omitempty"` + AttachMetadata *AttachMetadataApplyConfiguration `json:"attachMetadata,omitempty"` +} + +// ScrapeClassApplyConfiguration constructs a declarative configuration of the ScrapeClass type for use with +// apply. +func ScrapeClass() *ScrapeClassApplyConfiguration { + return &ScrapeClassApplyConfiguration{} +} + +// WithName sets the Name field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Name field is set to the value of the last call. +func (b *ScrapeClassApplyConfiguration) WithName(value string) *ScrapeClassApplyConfiguration { + b.Name = &value + return b +} + +// WithDefault sets the Default field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Default field is set to the value of the last call. +func (b *ScrapeClassApplyConfiguration) WithDefault(value bool) *ScrapeClassApplyConfiguration { + b.Default = &value + return b +} + +// WithFallbackScrapeProtocol sets the FallbackScrapeProtocol field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the FallbackScrapeProtocol field is set to the value of the last call. +func (b *ScrapeClassApplyConfiguration) WithFallbackScrapeProtocol(value monitoringv1.ScrapeProtocol) *ScrapeClassApplyConfiguration { + b.FallbackScrapeProtocol = &value + return b +} + +// WithTLSConfig sets the TLSConfig field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the TLSConfig field is set to the value of the last call. +func (b *ScrapeClassApplyConfiguration) WithTLSConfig(value *TLSConfigApplyConfiguration) *ScrapeClassApplyConfiguration { + b.TLSConfig = value + return b +} + +// WithAuthorization sets the Authorization field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Authorization field is set to the value of the last call. +func (b *ScrapeClassApplyConfiguration) WithAuthorization(value *AuthorizationApplyConfiguration) *ScrapeClassApplyConfiguration { + b.Authorization = value + return b +} + +// WithRelabelings adds the given value to the Relabelings field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the Relabelings field. +func (b *ScrapeClassApplyConfiguration) WithRelabelings(values ...*RelabelConfigApplyConfiguration) *ScrapeClassApplyConfiguration { + for i := range values { + if values[i] == nil { + panic("nil value passed to WithRelabelings") + } + b.Relabelings = append(b.Relabelings, *values[i]) + } + return b +} + +// WithMetricRelabelings adds the given value to the MetricRelabelings field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the MetricRelabelings field. +func (b *ScrapeClassApplyConfiguration) WithMetricRelabelings(values ...*RelabelConfigApplyConfiguration) *ScrapeClassApplyConfiguration { + for i := range values { + if values[i] == nil { + panic("nil value passed to WithMetricRelabelings") + } + b.MetricRelabelings = append(b.MetricRelabelings, *values[i]) + } + return b +} + +// WithAttachMetadata sets the AttachMetadata field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the AttachMetadata field is set to the value of the last call. +func (b *ScrapeClassApplyConfiguration) WithAttachMetadata(value *AttachMetadataApplyConfiguration) *ScrapeClassApplyConfiguration { + b.AttachMetadata = value + return b +} diff --git a/vendor/github.com/prometheus-operator/prometheus-operator/pkg/client/applyconfiguration/monitoring/v1/secretorconfigmap.go b/vendor/github.com/prometheus-operator/prometheus-operator/pkg/client/applyconfiguration/monitoring/v1/secretorconfigmap.go new file mode 100644 index 0000000000..813807c53e --- /dev/null +++ b/vendor/github.com/prometheus-operator/prometheus-operator/pkg/client/applyconfiguration/monitoring/v1/secretorconfigmap.go @@ -0,0 +1,50 @@ +// Copyright The prometheus-operator Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1 + +import ( + corev1 "k8s.io/api/core/v1" +) + +// SecretOrConfigMapApplyConfiguration represents a declarative configuration of the SecretOrConfigMap type for use +// with apply. +type SecretOrConfigMapApplyConfiguration struct { + Secret *corev1.SecretKeySelector `json:"secret,omitempty"` + ConfigMap *corev1.ConfigMapKeySelector `json:"configMap,omitempty"` +} + +// SecretOrConfigMapApplyConfiguration constructs a declarative configuration of the SecretOrConfigMap type for use with +// apply. +func SecretOrConfigMap() *SecretOrConfigMapApplyConfiguration { + return &SecretOrConfigMapApplyConfiguration{} +} + +// WithSecret sets the Secret field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Secret field is set to the value of the last call. +func (b *SecretOrConfigMapApplyConfiguration) WithSecret(value corev1.SecretKeySelector) *SecretOrConfigMapApplyConfiguration { + b.Secret = &value + return b +} + +// WithConfigMap sets the ConfigMap field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the ConfigMap field is set to the value of the last call. +func (b *SecretOrConfigMapApplyConfiguration) WithConfigMap(value corev1.ConfigMapKeySelector) *SecretOrConfigMapApplyConfiguration { + b.ConfigMap = &value + return b +} diff --git a/vendor/github.com/prometheus-operator/prometheus-operator/pkg/client/applyconfiguration/monitoring/v1/servicemonitor.go b/vendor/github.com/prometheus-operator/prometheus-operator/pkg/client/applyconfiguration/monitoring/v1/servicemonitor.go new file mode 100644 index 0000000000..7642e34b97 --- /dev/null +++ b/vendor/github.com/prometheus-operator/prometheus-operator/pkg/client/applyconfiguration/monitoring/v1/servicemonitor.go @@ -0,0 +1,240 @@ +// Copyright The prometheus-operator Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1 + +import ( + apismetav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + types "k8s.io/apimachinery/pkg/types" + metav1 "k8s.io/client-go/applyconfigurations/meta/v1" +) + +// ServiceMonitorApplyConfiguration represents a declarative configuration of the ServiceMonitor type for use +// with apply. +type ServiceMonitorApplyConfiguration struct { + metav1.TypeMetaApplyConfiguration `json:",inline"` + *metav1.ObjectMetaApplyConfiguration `json:"metadata,omitempty"` + Spec *ServiceMonitorSpecApplyConfiguration `json:"spec,omitempty"` + Status *ConfigResourceStatusApplyConfiguration `json:"status,omitempty"` +} + +// ServiceMonitor constructs a declarative configuration of the ServiceMonitor type for use with +// apply. +func ServiceMonitor(name, namespace string) *ServiceMonitorApplyConfiguration { + b := &ServiceMonitorApplyConfiguration{} + b.WithName(name) + b.WithNamespace(namespace) + b.WithKind("ServiceMonitor") + b.WithAPIVersion("monitoring.coreos.com/v1") + return b +} +func (b ServiceMonitorApplyConfiguration) IsApplyConfiguration() {} + +// WithKind sets the Kind field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Kind field is set to the value of the last call. +func (b *ServiceMonitorApplyConfiguration) WithKind(value string) *ServiceMonitorApplyConfiguration { + b.TypeMetaApplyConfiguration.Kind = &value + return b +} + +// WithAPIVersion sets the APIVersion field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the APIVersion field is set to the value of the last call. +func (b *ServiceMonitorApplyConfiguration) WithAPIVersion(value string) *ServiceMonitorApplyConfiguration { + b.TypeMetaApplyConfiguration.APIVersion = &value + return b +} + +// WithName sets the Name field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Name field is set to the value of the last call. +func (b *ServiceMonitorApplyConfiguration) WithName(value string) *ServiceMonitorApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.ObjectMetaApplyConfiguration.Name = &value + return b +} + +// WithGenerateName sets the GenerateName field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the GenerateName field is set to the value of the last call. +func (b *ServiceMonitorApplyConfiguration) WithGenerateName(value string) *ServiceMonitorApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.ObjectMetaApplyConfiguration.GenerateName = &value + return b +} + +// WithNamespace sets the Namespace field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Namespace field is set to the value of the last call. +func (b *ServiceMonitorApplyConfiguration) WithNamespace(value string) *ServiceMonitorApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.ObjectMetaApplyConfiguration.Namespace = &value + return b +} + +// WithUID sets the UID field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the UID field is set to the value of the last call. +func (b *ServiceMonitorApplyConfiguration) WithUID(value types.UID) *ServiceMonitorApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.ObjectMetaApplyConfiguration.UID = &value + return b +} + +// WithResourceVersion sets the ResourceVersion field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the ResourceVersion field is set to the value of the last call. +func (b *ServiceMonitorApplyConfiguration) WithResourceVersion(value string) *ServiceMonitorApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.ObjectMetaApplyConfiguration.ResourceVersion = &value + return b +} + +// WithGeneration sets the Generation field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Generation field is set to the value of the last call. +func (b *ServiceMonitorApplyConfiguration) WithGeneration(value int64) *ServiceMonitorApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.ObjectMetaApplyConfiguration.Generation = &value + return b +} + +// WithCreationTimestamp sets the CreationTimestamp field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the CreationTimestamp field is set to the value of the last call. +func (b *ServiceMonitorApplyConfiguration) WithCreationTimestamp(value apismetav1.Time) *ServiceMonitorApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.ObjectMetaApplyConfiguration.CreationTimestamp = &value + return b +} + +// WithDeletionTimestamp sets the DeletionTimestamp field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the DeletionTimestamp field is set to the value of the last call. +func (b *ServiceMonitorApplyConfiguration) WithDeletionTimestamp(value apismetav1.Time) *ServiceMonitorApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.ObjectMetaApplyConfiguration.DeletionTimestamp = &value + return b +} + +// WithDeletionGracePeriodSeconds sets the DeletionGracePeriodSeconds field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the DeletionGracePeriodSeconds field is set to the value of the last call. +func (b *ServiceMonitorApplyConfiguration) WithDeletionGracePeriodSeconds(value int64) *ServiceMonitorApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.ObjectMetaApplyConfiguration.DeletionGracePeriodSeconds = &value + return b +} + +// WithLabels puts the entries into the Labels field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, the entries provided by each call will be put on the Labels field, +// overwriting an existing map entries in Labels field with the same key. +func (b *ServiceMonitorApplyConfiguration) WithLabels(entries map[string]string) *ServiceMonitorApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + if b.ObjectMetaApplyConfiguration.Labels == nil && len(entries) > 0 { + b.ObjectMetaApplyConfiguration.Labels = make(map[string]string, len(entries)) + } + for k, v := range entries { + b.ObjectMetaApplyConfiguration.Labels[k] = v + } + return b +} + +// WithAnnotations puts the entries into the Annotations field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, the entries provided by each call will be put on the Annotations field, +// overwriting an existing map entries in Annotations field with the same key. +func (b *ServiceMonitorApplyConfiguration) WithAnnotations(entries map[string]string) *ServiceMonitorApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + if b.ObjectMetaApplyConfiguration.Annotations == nil && len(entries) > 0 { + b.ObjectMetaApplyConfiguration.Annotations = make(map[string]string, len(entries)) + } + for k, v := range entries { + b.ObjectMetaApplyConfiguration.Annotations[k] = v + } + return b +} + +// WithOwnerReferences adds the given value to the OwnerReferences field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the OwnerReferences field. +func (b *ServiceMonitorApplyConfiguration) WithOwnerReferences(values ...*metav1.OwnerReferenceApplyConfiguration) *ServiceMonitorApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + for i := range values { + if values[i] == nil { + panic("nil value passed to WithOwnerReferences") + } + b.ObjectMetaApplyConfiguration.OwnerReferences = append(b.ObjectMetaApplyConfiguration.OwnerReferences, *values[i]) + } + return b +} + +// WithFinalizers adds the given value to the Finalizers field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the Finalizers field. +func (b *ServiceMonitorApplyConfiguration) WithFinalizers(values ...string) *ServiceMonitorApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + for i := range values { + b.ObjectMetaApplyConfiguration.Finalizers = append(b.ObjectMetaApplyConfiguration.Finalizers, values[i]) + } + return b +} + +func (b *ServiceMonitorApplyConfiguration) ensureObjectMetaApplyConfigurationExists() { + if b.ObjectMetaApplyConfiguration == nil { + b.ObjectMetaApplyConfiguration = &metav1.ObjectMetaApplyConfiguration{} + } +} + +// WithSpec sets the Spec field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Spec field is set to the value of the last call. +func (b *ServiceMonitorApplyConfiguration) WithSpec(value *ServiceMonitorSpecApplyConfiguration) *ServiceMonitorApplyConfiguration { + b.Spec = value + return b +} + +// WithStatus sets the Status field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Status field is set to the value of the last call. +func (b *ServiceMonitorApplyConfiguration) WithStatus(value *ConfigResourceStatusApplyConfiguration) *ServiceMonitorApplyConfiguration { + b.Status = value + return b +} + +// GetKind retrieves the value of the Kind field in the declarative configuration. +func (b *ServiceMonitorApplyConfiguration) GetKind() *string { + return b.TypeMetaApplyConfiguration.Kind +} + +// GetAPIVersion retrieves the value of the APIVersion field in the declarative configuration. +func (b *ServiceMonitorApplyConfiguration) GetAPIVersion() *string { + return b.TypeMetaApplyConfiguration.APIVersion +} + +// GetName retrieves the value of the Name field in the declarative configuration. +func (b *ServiceMonitorApplyConfiguration) GetName() *string { + b.ensureObjectMetaApplyConfigurationExists() + return b.ObjectMetaApplyConfiguration.Name +} + +// GetNamespace retrieves the value of the Namespace field in the declarative configuration. +func (b *ServiceMonitorApplyConfiguration) GetNamespace() *string { + b.ensureObjectMetaApplyConfigurationExists() + return b.ObjectMetaApplyConfiguration.Namespace +} diff --git a/vendor/github.com/prometheus-operator/prometheus-operator/pkg/client/applyconfiguration/monitoring/v1/servicemonitorspec.go b/vendor/github.com/prometheus-operator/prometheus-operator/pkg/client/applyconfiguration/monitoring/v1/servicemonitorspec.go new file mode 100644 index 0000000000..b0b7887644 --- /dev/null +++ b/vendor/github.com/prometheus-operator/prometheus-operator/pkg/client/applyconfiguration/monitoring/v1/servicemonitorspec.go @@ -0,0 +1,257 @@ +// Copyright The prometheus-operator Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1 + +import ( + monitoringv1 "github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring/v1" + resource "k8s.io/apimachinery/pkg/api/resource" + metav1 "k8s.io/client-go/applyconfigurations/meta/v1" +) + +// ServiceMonitorSpecApplyConfiguration represents a declarative configuration of the ServiceMonitorSpec type for use +// with apply. +type ServiceMonitorSpecApplyConfiguration struct { + JobLabel *string `json:"jobLabel,omitempty"` + TargetLabels []string `json:"targetLabels,omitempty"` + PodTargetLabels []string `json:"podTargetLabels,omitempty"` + Endpoints []EndpointApplyConfiguration `json:"endpoints,omitempty"` + Selector *metav1.LabelSelectorApplyConfiguration `json:"selector,omitempty"` + SelectorMechanism *monitoringv1.SelectorMechanism `json:"selectorMechanism,omitempty"` + NamespaceSelector *NamespaceSelectorApplyConfiguration `json:"namespaceSelector,omitempty"` + SampleLimit *uint64 `json:"sampleLimit,omitempty"` + ScrapeProtocols []monitoringv1.ScrapeProtocol `json:"scrapeProtocols,omitempty"` + FallbackScrapeProtocol *monitoringv1.ScrapeProtocol `json:"fallbackScrapeProtocol,omitempty"` + TargetLimit *uint64 `json:"targetLimit,omitempty"` + LabelLimit *uint64 `json:"labelLimit,omitempty"` + LabelNameLengthLimit *uint64 `json:"labelNameLengthLimit,omitempty"` + LabelValueLengthLimit *uint64 `json:"labelValueLengthLimit,omitempty"` + NativeHistogramConfigApplyConfiguration `json:",inline"` + KeepDroppedTargets *uint64 `json:"keepDroppedTargets,omitempty"` + AttachMetadata *AttachMetadataApplyConfiguration `json:"attachMetadata,omitempty"` + ScrapeClassName *string `json:"scrapeClass,omitempty"` + BodySizeLimit *monitoringv1.ByteSize `json:"bodySizeLimit,omitempty"` + ServiceDiscoveryRole *monitoringv1.ServiceDiscoveryRole `json:"serviceDiscoveryRole,omitempty"` +} + +// ServiceMonitorSpecApplyConfiguration constructs a declarative configuration of the ServiceMonitorSpec type for use with +// apply. +func ServiceMonitorSpec() *ServiceMonitorSpecApplyConfiguration { + return &ServiceMonitorSpecApplyConfiguration{} +} + +// WithJobLabel sets the JobLabel field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the JobLabel field is set to the value of the last call. +func (b *ServiceMonitorSpecApplyConfiguration) WithJobLabel(value string) *ServiceMonitorSpecApplyConfiguration { + b.JobLabel = &value + return b +} + +// WithTargetLabels adds the given value to the TargetLabels field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the TargetLabels field. +func (b *ServiceMonitorSpecApplyConfiguration) WithTargetLabels(values ...string) *ServiceMonitorSpecApplyConfiguration { + for i := range values { + b.TargetLabels = append(b.TargetLabels, values[i]) + } + return b +} + +// WithPodTargetLabels adds the given value to the PodTargetLabels field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the PodTargetLabels field. +func (b *ServiceMonitorSpecApplyConfiguration) WithPodTargetLabels(values ...string) *ServiceMonitorSpecApplyConfiguration { + for i := range values { + b.PodTargetLabels = append(b.PodTargetLabels, values[i]) + } + return b +} + +// WithEndpoints adds the given value to the Endpoints field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the Endpoints field. +func (b *ServiceMonitorSpecApplyConfiguration) WithEndpoints(values ...*EndpointApplyConfiguration) *ServiceMonitorSpecApplyConfiguration { + for i := range values { + if values[i] == nil { + panic("nil value passed to WithEndpoints") + } + b.Endpoints = append(b.Endpoints, *values[i]) + } + return b +} + +// WithSelector sets the Selector field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Selector field is set to the value of the last call. +func (b *ServiceMonitorSpecApplyConfiguration) WithSelector(value *metav1.LabelSelectorApplyConfiguration) *ServiceMonitorSpecApplyConfiguration { + b.Selector = value + return b +} + +// WithSelectorMechanism sets the SelectorMechanism field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the SelectorMechanism field is set to the value of the last call. +func (b *ServiceMonitorSpecApplyConfiguration) WithSelectorMechanism(value monitoringv1.SelectorMechanism) *ServiceMonitorSpecApplyConfiguration { + b.SelectorMechanism = &value + return b +} + +// WithNamespaceSelector sets the NamespaceSelector field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the NamespaceSelector field is set to the value of the last call. +func (b *ServiceMonitorSpecApplyConfiguration) WithNamespaceSelector(value *NamespaceSelectorApplyConfiguration) *ServiceMonitorSpecApplyConfiguration { + b.NamespaceSelector = value + return b +} + +// WithSampleLimit sets the SampleLimit field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the SampleLimit field is set to the value of the last call. +func (b *ServiceMonitorSpecApplyConfiguration) WithSampleLimit(value uint64) *ServiceMonitorSpecApplyConfiguration { + b.SampleLimit = &value + return b +} + +// WithScrapeProtocols adds the given value to the ScrapeProtocols field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the ScrapeProtocols field. +func (b *ServiceMonitorSpecApplyConfiguration) WithScrapeProtocols(values ...monitoringv1.ScrapeProtocol) *ServiceMonitorSpecApplyConfiguration { + for i := range values { + b.ScrapeProtocols = append(b.ScrapeProtocols, values[i]) + } + return b +} + +// WithFallbackScrapeProtocol sets the FallbackScrapeProtocol field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the FallbackScrapeProtocol field is set to the value of the last call. +func (b *ServiceMonitorSpecApplyConfiguration) WithFallbackScrapeProtocol(value monitoringv1.ScrapeProtocol) *ServiceMonitorSpecApplyConfiguration { + b.FallbackScrapeProtocol = &value + return b +} + +// WithTargetLimit sets the TargetLimit field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the TargetLimit field is set to the value of the last call. +func (b *ServiceMonitorSpecApplyConfiguration) WithTargetLimit(value uint64) *ServiceMonitorSpecApplyConfiguration { + b.TargetLimit = &value + return b +} + +// WithLabelLimit sets the LabelLimit field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the LabelLimit field is set to the value of the last call. +func (b *ServiceMonitorSpecApplyConfiguration) WithLabelLimit(value uint64) *ServiceMonitorSpecApplyConfiguration { + b.LabelLimit = &value + return b +} + +// WithLabelNameLengthLimit sets the LabelNameLengthLimit field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the LabelNameLengthLimit field is set to the value of the last call. +func (b *ServiceMonitorSpecApplyConfiguration) WithLabelNameLengthLimit(value uint64) *ServiceMonitorSpecApplyConfiguration { + b.LabelNameLengthLimit = &value + return b +} + +// WithLabelValueLengthLimit sets the LabelValueLengthLimit field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the LabelValueLengthLimit field is set to the value of the last call. +func (b *ServiceMonitorSpecApplyConfiguration) WithLabelValueLengthLimit(value uint64) *ServiceMonitorSpecApplyConfiguration { + b.LabelValueLengthLimit = &value + return b +} + +// WithScrapeNativeHistograms sets the ScrapeNativeHistograms field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the ScrapeNativeHistograms field is set to the value of the last call. +func (b *ServiceMonitorSpecApplyConfiguration) WithScrapeNativeHistograms(value bool) *ServiceMonitorSpecApplyConfiguration { + b.NativeHistogramConfigApplyConfiguration.ScrapeNativeHistograms = &value + return b +} + +// WithScrapeClassicHistograms sets the ScrapeClassicHistograms field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the ScrapeClassicHistograms field is set to the value of the last call. +func (b *ServiceMonitorSpecApplyConfiguration) WithScrapeClassicHistograms(value bool) *ServiceMonitorSpecApplyConfiguration { + b.NativeHistogramConfigApplyConfiguration.ScrapeClassicHistograms = &value + return b +} + +// WithNativeHistogramBucketLimit sets the NativeHistogramBucketLimit field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the NativeHistogramBucketLimit field is set to the value of the last call. +func (b *ServiceMonitorSpecApplyConfiguration) WithNativeHistogramBucketLimit(value uint64) *ServiceMonitorSpecApplyConfiguration { + b.NativeHistogramConfigApplyConfiguration.NativeHistogramBucketLimit = &value + return b +} + +// WithNativeHistogramMinBucketFactor sets the NativeHistogramMinBucketFactor field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the NativeHistogramMinBucketFactor field is set to the value of the last call. +func (b *ServiceMonitorSpecApplyConfiguration) WithNativeHistogramMinBucketFactor(value resource.Quantity) *ServiceMonitorSpecApplyConfiguration { + b.NativeHistogramConfigApplyConfiguration.NativeHistogramMinBucketFactor = &value + return b +} + +// WithConvertClassicHistogramsToNHCB sets the ConvertClassicHistogramsToNHCB field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the ConvertClassicHistogramsToNHCB field is set to the value of the last call. +func (b *ServiceMonitorSpecApplyConfiguration) WithConvertClassicHistogramsToNHCB(value bool) *ServiceMonitorSpecApplyConfiguration { + b.NativeHistogramConfigApplyConfiguration.ConvertClassicHistogramsToNHCB = &value + return b +} + +// WithKeepDroppedTargets sets the KeepDroppedTargets field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the KeepDroppedTargets field is set to the value of the last call. +func (b *ServiceMonitorSpecApplyConfiguration) WithKeepDroppedTargets(value uint64) *ServiceMonitorSpecApplyConfiguration { + b.KeepDroppedTargets = &value + return b +} + +// WithAttachMetadata sets the AttachMetadata field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the AttachMetadata field is set to the value of the last call. +func (b *ServiceMonitorSpecApplyConfiguration) WithAttachMetadata(value *AttachMetadataApplyConfiguration) *ServiceMonitorSpecApplyConfiguration { + b.AttachMetadata = value + return b +} + +// WithScrapeClassName sets the ScrapeClassName field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the ScrapeClassName field is set to the value of the last call. +func (b *ServiceMonitorSpecApplyConfiguration) WithScrapeClassName(value string) *ServiceMonitorSpecApplyConfiguration { + b.ScrapeClassName = &value + return b +} + +// WithBodySizeLimit sets the BodySizeLimit field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the BodySizeLimit field is set to the value of the last call. +func (b *ServiceMonitorSpecApplyConfiguration) WithBodySizeLimit(value monitoringv1.ByteSize) *ServiceMonitorSpecApplyConfiguration { + b.BodySizeLimit = &value + return b +} + +// WithServiceDiscoveryRole sets the ServiceDiscoveryRole field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the ServiceDiscoveryRole field is set to the value of the last call. +func (b *ServiceMonitorSpecApplyConfiguration) WithServiceDiscoveryRole(value monitoringv1.ServiceDiscoveryRole) *ServiceMonitorSpecApplyConfiguration { + b.ServiceDiscoveryRole = &value + return b +} diff --git a/vendor/github.com/prometheus-operator/prometheus-operator/pkg/client/applyconfiguration/monitoring/v1/shardretentionpolicy.go b/vendor/github.com/prometheus-operator/prometheus-operator/pkg/client/applyconfiguration/monitoring/v1/shardretentionpolicy.go new file mode 100644 index 0000000000..f2f0dc22b3 --- /dev/null +++ b/vendor/github.com/prometheus-operator/prometheus-operator/pkg/client/applyconfiguration/monitoring/v1/shardretentionpolicy.go @@ -0,0 +1,50 @@ +// Copyright The prometheus-operator Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1 + +import ( + monitoringv1 "github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring/v1" +) + +// ShardRetentionPolicyApplyConfiguration represents a declarative configuration of the ShardRetentionPolicy type for use +// with apply. +type ShardRetentionPolicyApplyConfiguration struct { + WhenScaled *monitoringv1.WhenScaledRetentionType `json:"whenScaled,omitempty"` + Retain *RetainConfigApplyConfiguration `json:"retain,omitempty"` +} + +// ShardRetentionPolicyApplyConfiguration constructs a declarative configuration of the ShardRetentionPolicy type for use with +// apply. +func ShardRetentionPolicy() *ShardRetentionPolicyApplyConfiguration { + return &ShardRetentionPolicyApplyConfiguration{} +} + +// WithWhenScaled sets the WhenScaled field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the WhenScaled field is set to the value of the last call. +func (b *ShardRetentionPolicyApplyConfiguration) WithWhenScaled(value monitoringv1.WhenScaledRetentionType) *ShardRetentionPolicyApplyConfiguration { + b.WhenScaled = &value + return b +} + +// WithRetain sets the Retain field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Retain field is set to the value of the last call. +func (b *ShardRetentionPolicyApplyConfiguration) WithRetain(value *RetainConfigApplyConfiguration) *ShardRetentionPolicyApplyConfiguration { + b.Retain = value + return b +} diff --git a/vendor/github.com/prometheus-operator/prometheus-operator/pkg/client/applyconfiguration/monitoring/v1/shardstatus.go b/vendor/github.com/prometheus-operator/prometheus-operator/pkg/client/applyconfiguration/monitoring/v1/shardstatus.go new file mode 100644 index 0000000000..dc99ebb0ce --- /dev/null +++ b/vendor/github.com/prometheus-operator/prometheus-operator/pkg/client/applyconfiguration/monitoring/v1/shardstatus.go @@ -0,0 +1,73 @@ +// Copyright The prometheus-operator Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1 + +// ShardStatusApplyConfiguration represents a declarative configuration of the ShardStatus type for use +// with apply. +type ShardStatusApplyConfiguration struct { + ShardID *string `json:"shardID,omitempty"` + Replicas *int32 `json:"replicas,omitempty"` + UpdatedReplicas *int32 `json:"updatedReplicas,omitempty"` + AvailableReplicas *int32 `json:"availableReplicas,omitempty"` + UnavailableReplicas *int32 `json:"unavailableReplicas,omitempty"` +} + +// ShardStatusApplyConfiguration constructs a declarative configuration of the ShardStatus type for use with +// apply. +func ShardStatus() *ShardStatusApplyConfiguration { + return &ShardStatusApplyConfiguration{} +} + +// WithShardID sets the ShardID field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the ShardID field is set to the value of the last call. +func (b *ShardStatusApplyConfiguration) WithShardID(value string) *ShardStatusApplyConfiguration { + b.ShardID = &value + return b +} + +// WithReplicas sets the Replicas field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Replicas field is set to the value of the last call. +func (b *ShardStatusApplyConfiguration) WithReplicas(value int32) *ShardStatusApplyConfiguration { + b.Replicas = &value + return b +} + +// WithUpdatedReplicas sets the UpdatedReplicas field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the UpdatedReplicas field is set to the value of the last call. +func (b *ShardStatusApplyConfiguration) WithUpdatedReplicas(value int32) *ShardStatusApplyConfiguration { + b.UpdatedReplicas = &value + return b +} + +// WithAvailableReplicas sets the AvailableReplicas field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the AvailableReplicas field is set to the value of the last call. +func (b *ShardStatusApplyConfiguration) WithAvailableReplicas(value int32) *ShardStatusApplyConfiguration { + b.AvailableReplicas = &value + return b +} + +// WithUnavailableReplicas sets the UnavailableReplicas field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the UnavailableReplicas field is set to the value of the last call. +func (b *ShardStatusApplyConfiguration) WithUnavailableReplicas(value int32) *ShardStatusApplyConfiguration { + b.UnavailableReplicas = &value + return b +} diff --git a/vendor/github.com/prometheus-operator/prometheus-operator/pkg/client/applyconfiguration/monitoring/v1/sigv4.go b/vendor/github.com/prometheus-operator/prometheus-operator/pkg/client/applyconfiguration/monitoring/v1/sigv4.go new file mode 100644 index 0000000000..3a053c5323 --- /dev/null +++ b/vendor/github.com/prometheus-operator/prometheus-operator/pkg/client/applyconfiguration/monitoring/v1/sigv4.go @@ -0,0 +1,86 @@ +// Copyright The prometheus-operator Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1 + +import ( + corev1 "k8s.io/api/core/v1" +) + +// Sigv4ApplyConfiguration represents a declarative configuration of the Sigv4 type for use +// with apply. +type Sigv4ApplyConfiguration struct { + Region *string `json:"region,omitempty"` + AccessKey *corev1.SecretKeySelector `json:"accessKey,omitempty"` + SecretKey *corev1.SecretKeySelector `json:"secretKey,omitempty"` + Profile *string `json:"profile,omitempty"` + RoleArn *string `json:"roleArn,omitempty"` + UseFIPSSTSEndpoint *bool `json:"useFIPSSTSEndpoint,omitempty"` +} + +// Sigv4ApplyConfiguration constructs a declarative configuration of the Sigv4 type for use with +// apply. +func Sigv4() *Sigv4ApplyConfiguration { + return &Sigv4ApplyConfiguration{} +} + +// WithRegion sets the Region field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Region field is set to the value of the last call. +func (b *Sigv4ApplyConfiguration) WithRegion(value string) *Sigv4ApplyConfiguration { + b.Region = &value + return b +} + +// WithAccessKey sets the AccessKey field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the AccessKey field is set to the value of the last call. +func (b *Sigv4ApplyConfiguration) WithAccessKey(value corev1.SecretKeySelector) *Sigv4ApplyConfiguration { + b.AccessKey = &value + return b +} + +// WithSecretKey sets the SecretKey field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the SecretKey field is set to the value of the last call. +func (b *Sigv4ApplyConfiguration) WithSecretKey(value corev1.SecretKeySelector) *Sigv4ApplyConfiguration { + b.SecretKey = &value + return b +} + +// WithProfile sets the Profile field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Profile field is set to the value of the last call. +func (b *Sigv4ApplyConfiguration) WithProfile(value string) *Sigv4ApplyConfiguration { + b.Profile = &value + return b +} + +// WithRoleArn sets the RoleArn field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the RoleArn field is set to the value of the last call. +func (b *Sigv4ApplyConfiguration) WithRoleArn(value string) *Sigv4ApplyConfiguration { + b.RoleArn = &value + return b +} + +// WithUseFIPSSTSEndpoint sets the UseFIPSSTSEndpoint field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the UseFIPSSTSEndpoint field is set to the value of the last call. +func (b *Sigv4ApplyConfiguration) WithUseFIPSSTSEndpoint(value bool) *Sigv4ApplyConfiguration { + b.UseFIPSSTSEndpoint = &value + return b +} diff --git a/vendor/github.com/prometheus-operator/prometheus-operator/pkg/client/applyconfiguration/monitoring/v1/statefulsetupdatestrategy.go b/vendor/github.com/prometheus-operator/prometheus-operator/pkg/client/applyconfiguration/monitoring/v1/statefulsetupdatestrategy.go new file mode 100644 index 0000000000..f5e00d979c --- /dev/null +++ b/vendor/github.com/prometheus-operator/prometheus-operator/pkg/client/applyconfiguration/monitoring/v1/statefulsetupdatestrategy.go @@ -0,0 +1,50 @@ +// Copyright The prometheus-operator Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1 + +import ( + monitoringv1 "github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring/v1" +) + +// StatefulSetUpdateStrategyApplyConfiguration represents a declarative configuration of the StatefulSetUpdateStrategy type for use +// with apply. +type StatefulSetUpdateStrategyApplyConfiguration struct { + Type *monitoringv1.StatefulSetUpdateStrategyType `json:"type,omitempty"` + RollingUpdate *RollingUpdateStatefulSetStrategyApplyConfiguration `json:"rollingUpdate,omitempty"` +} + +// StatefulSetUpdateStrategyApplyConfiguration constructs a declarative configuration of the StatefulSetUpdateStrategy type for use with +// apply. +func StatefulSetUpdateStrategy() *StatefulSetUpdateStrategyApplyConfiguration { + return &StatefulSetUpdateStrategyApplyConfiguration{} +} + +// WithType sets the Type field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Type field is set to the value of the last call. +func (b *StatefulSetUpdateStrategyApplyConfiguration) WithType(value monitoringv1.StatefulSetUpdateStrategyType) *StatefulSetUpdateStrategyApplyConfiguration { + b.Type = &value + return b +} + +// WithRollingUpdate sets the RollingUpdate field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the RollingUpdate field is set to the value of the last call. +func (b *StatefulSetUpdateStrategyApplyConfiguration) WithRollingUpdate(value *RollingUpdateStatefulSetStrategyApplyConfiguration) *StatefulSetUpdateStrategyApplyConfiguration { + b.RollingUpdate = value + return b +} diff --git a/vendor/github.com/prometheus-operator/prometheus-operator/pkg/client/applyconfiguration/monitoring/v1/storagespec.go b/vendor/github.com/prometheus-operator/prometheus-operator/pkg/client/applyconfiguration/monitoring/v1/storagespec.go new file mode 100644 index 0000000000..c215e3a8e8 --- /dev/null +++ b/vendor/github.com/prometheus-operator/prometheus-operator/pkg/client/applyconfiguration/monitoring/v1/storagespec.go @@ -0,0 +1,68 @@ +// Copyright The prometheus-operator Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1 + +import ( + corev1 "k8s.io/api/core/v1" +) + +// StorageSpecApplyConfiguration represents a declarative configuration of the StorageSpec type for use +// with apply. +type StorageSpecApplyConfiguration struct { + DisableMountSubPath *bool `json:"disableMountSubPath,omitempty"` + EmptyDir *corev1.EmptyDirVolumeSource `json:"emptyDir,omitempty"` + Ephemeral *corev1.EphemeralVolumeSource `json:"ephemeral,omitempty"` + VolumeClaimTemplate *EmbeddedPersistentVolumeClaimApplyConfiguration `json:"volumeClaimTemplate,omitempty"` +} + +// StorageSpecApplyConfiguration constructs a declarative configuration of the StorageSpec type for use with +// apply. +func StorageSpec() *StorageSpecApplyConfiguration { + return &StorageSpecApplyConfiguration{} +} + +// WithDisableMountSubPath sets the DisableMountSubPath field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the DisableMountSubPath field is set to the value of the last call. +func (b *StorageSpecApplyConfiguration) WithDisableMountSubPath(value bool) *StorageSpecApplyConfiguration { + b.DisableMountSubPath = &value + return b +} + +// WithEmptyDir sets the EmptyDir field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the EmptyDir field is set to the value of the last call. +func (b *StorageSpecApplyConfiguration) WithEmptyDir(value corev1.EmptyDirVolumeSource) *StorageSpecApplyConfiguration { + b.EmptyDir = &value + return b +} + +// WithEphemeral sets the Ephemeral field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Ephemeral field is set to the value of the last call. +func (b *StorageSpecApplyConfiguration) WithEphemeral(value corev1.EphemeralVolumeSource) *StorageSpecApplyConfiguration { + b.Ephemeral = &value + return b +} + +// WithVolumeClaimTemplate sets the VolumeClaimTemplate field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the VolumeClaimTemplate field is set to the value of the last call. +func (b *StorageSpecApplyConfiguration) WithVolumeClaimTemplate(value *EmbeddedPersistentVolumeClaimApplyConfiguration) *StorageSpecApplyConfiguration { + b.VolumeClaimTemplate = value + return b +} diff --git a/vendor/github.com/prometheus-operator/prometheus-operator/pkg/client/applyconfiguration/monitoring/v1/thanosruler.go b/vendor/github.com/prometheus-operator/prometheus-operator/pkg/client/applyconfiguration/monitoring/v1/thanosruler.go new file mode 100644 index 0000000000..97320820dd --- /dev/null +++ b/vendor/github.com/prometheus-operator/prometheus-operator/pkg/client/applyconfiguration/monitoring/v1/thanosruler.go @@ -0,0 +1,240 @@ +// Copyright The prometheus-operator Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1 + +import ( + apismetav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + types "k8s.io/apimachinery/pkg/types" + metav1 "k8s.io/client-go/applyconfigurations/meta/v1" +) + +// ThanosRulerApplyConfiguration represents a declarative configuration of the ThanosRuler type for use +// with apply. +type ThanosRulerApplyConfiguration struct { + metav1.TypeMetaApplyConfiguration `json:",inline"` + *metav1.ObjectMetaApplyConfiguration `json:"metadata,omitempty"` + Spec *ThanosRulerSpecApplyConfiguration `json:"spec,omitempty"` + Status *ThanosRulerStatusApplyConfiguration `json:"status,omitempty"` +} + +// ThanosRuler constructs a declarative configuration of the ThanosRuler type for use with +// apply. +func ThanosRuler(name, namespace string) *ThanosRulerApplyConfiguration { + b := &ThanosRulerApplyConfiguration{} + b.WithName(name) + b.WithNamespace(namespace) + b.WithKind("ThanosRuler") + b.WithAPIVersion("monitoring.coreos.com/v1") + return b +} +func (b ThanosRulerApplyConfiguration) IsApplyConfiguration() {} + +// WithKind sets the Kind field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Kind field is set to the value of the last call. +func (b *ThanosRulerApplyConfiguration) WithKind(value string) *ThanosRulerApplyConfiguration { + b.TypeMetaApplyConfiguration.Kind = &value + return b +} + +// WithAPIVersion sets the APIVersion field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the APIVersion field is set to the value of the last call. +func (b *ThanosRulerApplyConfiguration) WithAPIVersion(value string) *ThanosRulerApplyConfiguration { + b.TypeMetaApplyConfiguration.APIVersion = &value + return b +} + +// WithName sets the Name field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Name field is set to the value of the last call. +func (b *ThanosRulerApplyConfiguration) WithName(value string) *ThanosRulerApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.ObjectMetaApplyConfiguration.Name = &value + return b +} + +// WithGenerateName sets the GenerateName field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the GenerateName field is set to the value of the last call. +func (b *ThanosRulerApplyConfiguration) WithGenerateName(value string) *ThanosRulerApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.ObjectMetaApplyConfiguration.GenerateName = &value + return b +} + +// WithNamespace sets the Namespace field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Namespace field is set to the value of the last call. +func (b *ThanosRulerApplyConfiguration) WithNamespace(value string) *ThanosRulerApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.ObjectMetaApplyConfiguration.Namespace = &value + return b +} + +// WithUID sets the UID field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the UID field is set to the value of the last call. +func (b *ThanosRulerApplyConfiguration) WithUID(value types.UID) *ThanosRulerApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.ObjectMetaApplyConfiguration.UID = &value + return b +} + +// WithResourceVersion sets the ResourceVersion field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the ResourceVersion field is set to the value of the last call. +func (b *ThanosRulerApplyConfiguration) WithResourceVersion(value string) *ThanosRulerApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.ObjectMetaApplyConfiguration.ResourceVersion = &value + return b +} + +// WithGeneration sets the Generation field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Generation field is set to the value of the last call. +func (b *ThanosRulerApplyConfiguration) WithGeneration(value int64) *ThanosRulerApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.ObjectMetaApplyConfiguration.Generation = &value + return b +} + +// WithCreationTimestamp sets the CreationTimestamp field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the CreationTimestamp field is set to the value of the last call. +func (b *ThanosRulerApplyConfiguration) WithCreationTimestamp(value apismetav1.Time) *ThanosRulerApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.ObjectMetaApplyConfiguration.CreationTimestamp = &value + return b +} + +// WithDeletionTimestamp sets the DeletionTimestamp field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the DeletionTimestamp field is set to the value of the last call. +func (b *ThanosRulerApplyConfiguration) WithDeletionTimestamp(value apismetav1.Time) *ThanosRulerApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.ObjectMetaApplyConfiguration.DeletionTimestamp = &value + return b +} + +// WithDeletionGracePeriodSeconds sets the DeletionGracePeriodSeconds field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the DeletionGracePeriodSeconds field is set to the value of the last call. +func (b *ThanosRulerApplyConfiguration) WithDeletionGracePeriodSeconds(value int64) *ThanosRulerApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.ObjectMetaApplyConfiguration.DeletionGracePeriodSeconds = &value + return b +} + +// WithLabels puts the entries into the Labels field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, the entries provided by each call will be put on the Labels field, +// overwriting an existing map entries in Labels field with the same key. +func (b *ThanosRulerApplyConfiguration) WithLabels(entries map[string]string) *ThanosRulerApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + if b.ObjectMetaApplyConfiguration.Labels == nil && len(entries) > 0 { + b.ObjectMetaApplyConfiguration.Labels = make(map[string]string, len(entries)) + } + for k, v := range entries { + b.ObjectMetaApplyConfiguration.Labels[k] = v + } + return b +} + +// WithAnnotations puts the entries into the Annotations field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, the entries provided by each call will be put on the Annotations field, +// overwriting an existing map entries in Annotations field with the same key. +func (b *ThanosRulerApplyConfiguration) WithAnnotations(entries map[string]string) *ThanosRulerApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + if b.ObjectMetaApplyConfiguration.Annotations == nil && len(entries) > 0 { + b.ObjectMetaApplyConfiguration.Annotations = make(map[string]string, len(entries)) + } + for k, v := range entries { + b.ObjectMetaApplyConfiguration.Annotations[k] = v + } + return b +} + +// WithOwnerReferences adds the given value to the OwnerReferences field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the OwnerReferences field. +func (b *ThanosRulerApplyConfiguration) WithOwnerReferences(values ...*metav1.OwnerReferenceApplyConfiguration) *ThanosRulerApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + for i := range values { + if values[i] == nil { + panic("nil value passed to WithOwnerReferences") + } + b.ObjectMetaApplyConfiguration.OwnerReferences = append(b.ObjectMetaApplyConfiguration.OwnerReferences, *values[i]) + } + return b +} + +// WithFinalizers adds the given value to the Finalizers field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the Finalizers field. +func (b *ThanosRulerApplyConfiguration) WithFinalizers(values ...string) *ThanosRulerApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + for i := range values { + b.ObjectMetaApplyConfiguration.Finalizers = append(b.ObjectMetaApplyConfiguration.Finalizers, values[i]) + } + return b +} + +func (b *ThanosRulerApplyConfiguration) ensureObjectMetaApplyConfigurationExists() { + if b.ObjectMetaApplyConfiguration == nil { + b.ObjectMetaApplyConfiguration = &metav1.ObjectMetaApplyConfiguration{} + } +} + +// WithSpec sets the Spec field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Spec field is set to the value of the last call. +func (b *ThanosRulerApplyConfiguration) WithSpec(value *ThanosRulerSpecApplyConfiguration) *ThanosRulerApplyConfiguration { + b.Spec = value + return b +} + +// WithStatus sets the Status field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Status field is set to the value of the last call. +func (b *ThanosRulerApplyConfiguration) WithStatus(value *ThanosRulerStatusApplyConfiguration) *ThanosRulerApplyConfiguration { + b.Status = value + return b +} + +// GetKind retrieves the value of the Kind field in the declarative configuration. +func (b *ThanosRulerApplyConfiguration) GetKind() *string { + return b.TypeMetaApplyConfiguration.Kind +} + +// GetAPIVersion retrieves the value of the APIVersion field in the declarative configuration. +func (b *ThanosRulerApplyConfiguration) GetAPIVersion() *string { + return b.TypeMetaApplyConfiguration.APIVersion +} + +// GetName retrieves the value of the Name field in the declarative configuration. +func (b *ThanosRulerApplyConfiguration) GetName() *string { + b.ensureObjectMetaApplyConfigurationExists() + return b.ObjectMetaApplyConfiguration.Name +} + +// GetNamespace retrieves the value of the Namespace field in the declarative configuration. +func (b *ThanosRulerApplyConfiguration) GetNamespace() *string { + b.ensureObjectMetaApplyConfigurationExists() + return b.ObjectMetaApplyConfiguration.Namespace +} diff --git a/vendor/github.com/prometheus-operator/prometheus-operator/pkg/client/applyconfiguration/monitoring/v1/thanosrulerspec.go b/vendor/github.com/prometheus-operator/prometheus-operator/pkg/client/applyconfiguration/monitoring/v1/thanosrulerspec.go new file mode 100644 index 0000000000..f0608d96ce --- /dev/null +++ b/vendor/github.com/prometheus-operator/prometheus-operator/pkg/client/applyconfiguration/monitoring/v1/thanosrulerspec.go @@ -0,0 +1,687 @@ +// Copyright The prometheus-operator Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1 + +import ( + monitoringv1 "github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring/v1" + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/client-go/applyconfigurations/meta/v1" +) + +// ThanosRulerSpecApplyConfiguration represents a declarative configuration of the ThanosRulerSpec type for use +// with apply. +type ThanosRulerSpecApplyConfiguration struct { + Version *string `json:"version,omitempty"` + PodMetadata *EmbeddedObjectMetadataApplyConfiguration `json:"podMetadata,omitempty"` + Image *string `json:"image,omitempty"` + ImagePullPolicy *corev1.PullPolicy `json:"imagePullPolicy,omitempty"` + ImagePullSecrets []corev1.LocalObjectReference `json:"imagePullSecrets,omitempty"` + Paused *bool `json:"paused,omitempty"` + Replicas *int32 `json:"replicas,omitempty"` + NodeSelector map[string]string `json:"nodeSelector,omitempty"` + Resources *corev1.ResourceRequirements `json:"resources,omitempty"` + Affinity *corev1.Affinity `json:"affinity,omitempty"` + Tolerations []corev1.Toleration `json:"tolerations,omitempty"` + TopologySpreadConstraints []corev1.TopologySpreadConstraint `json:"topologySpreadConstraints,omitempty"` + SecurityContext *corev1.PodSecurityContext `json:"securityContext,omitempty"` + DNSPolicy *monitoringv1.DNSPolicy `json:"dnsPolicy,omitempty"` + DNSConfig *PodDNSConfigApplyConfiguration `json:"dnsConfig,omitempty"` + EnableServiceLinks *bool `json:"enableServiceLinks,omitempty"` + PriorityClassName *string `json:"priorityClassName,omitempty"` + ServiceName *string `json:"serviceName,omitempty"` + ServiceAccountName *string `json:"serviceAccountName,omitempty"` + Storage *StorageSpecApplyConfiguration `json:"storage,omitempty"` + Volumes []corev1.Volume `json:"volumes,omitempty"` + VolumeMounts []corev1.VolumeMount `json:"volumeMounts,omitempty"` + ObjectStorageConfig *corev1.SecretKeySelector `json:"objectStorageConfig,omitempty"` + ObjectStorageConfigFile *string `json:"objectStorageConfigFile,omitempty"` + ListenLocal *bool `json:"listenLocal,omitempty"` + PodManagementPolicy *monitoringv1.PodManagementPolicyType `json:"podManagementPolicy,omitempty"` + UpdateStrategy *StatefulSetUpdateStrategyApplyConfiguration `json:"updateStrategy,omitempty"` + QueryEndpoints []string `json:"queryEndpoints,omitempty"` + QueryConfig *corev1.SecretKeySelector `json:"queryConfig,omitempty"` + AlertManagersURL []string `json:"alertmanagersUrl,omitempty"` + AlertManagersConfig *corev1.SecretKeySelector `json:"alertmanagersConfig,omitempty"` + RuleSelector *metav1.LabelSelectorApplyConfiguration `json:"ruleSelector,omitempty"` + RuleNamespaceSelector *metav1.LabelSelectorApplyConfiguration `json:"ruleNamespaceSelector,omitempty"` + EnforcedNamespaceLabel *string `json:"enforcedNamespaceLabel,omitempty"` + ExcludedFromEnforcement []ObjectReferenceApplyConfiguration `json:"excludedFromEnforcement,omitempty"` + PrometheusRulesExcludedFromEnforce []PrometheusRuleExcludeConfigApplyConfiguration `json:"prometheusRulesExcludedFromEnforce,omitempty"` + LogLevel *string `json:"logLevel,omitempty"` + LogFormat *string `json:"logFormat,omitempty"` + PortName *string `json:"portName,omitempty"` + EvaluationInterval *monitoringv1.Duration `json:"evaluationInterval,omitempty"` + ResendDelay *monitoringv1.Duration `json:"resendDelay,omitempty"` + RuleOutageTolerance *monitoringv1.Duration `json:"ruleOutageTolerance,omitempty"` + RuleQueryOffset *monitoringv1.Duration `json:"ruleQueryOffset,omitempty"` + RuleConcurrentEval *int32 `json:"ruleConcurrentEval,omitempty"` + RuleGracePeriod *monitoringv1.Duration `json:"ruleGracePeriod,omitempty"` + Retention *monitoringv1.Duration `json:"retention,omitempty"` + Containers []corev1.Container `json:"containers,omitempty"` + InitContainers []corev1.Container `json:"initContainers,omitempty"` + TracingConfig *corev1.SecretKeySelector `json:"tracingConfig,omitempty"` + TracingConfigFile *string `json:"tracingConfigFile,omitempty"` + Labels map[string]string `json:"labels,omitempty"` + AlertDropLabels []string `json:"alertDropLabels,omitempty"` + ExternalPrefix *string `json:"externalPrefix,omitempty"` + RoutePrefix *string `json:"routePrefix,omitempty"` + GRPCServerTLSConfig *TLSConfigApplyConfiguration `json:"grpcServerTlsConfig,omitempty"` + AlertQueryURL *string `json:"alertQueryUrl,omitempty"` + MinReadySeconds *int32 `json:"minReadySeconds,omitempty"` + AlertRelabelConfigs *corev1.SecretKeySelector `json:"alertRelabelConfigs,omitempty"` + AlertRelabelConfigFile *string `json:"alertRelabelConfigFile,omitempty"` + HostAliases []HostAliasApplyConfiguration `json:"hostAliases,omitempty"` + AdditionalArgs []ArgumentApplyConfiguration `json:"additionalArgs,omitempty"` + Web *ThanosRulerWebSpecApplyConfiguration `json:"web,omitempty"` + RemoteWrite []RemoteWriteSpecApplyConfiguration `json:"remoteWrite,omitempty"` + TerminationGracePeriodSeconds *int64 `json:"terminationGracePeriodSeconds,omitempty"` + EnableFeatures []monitoringv1.EnableFeature `json:"enableFeatures,omitempty"` + HostUsers *bool `json:"hostUsers,omitempty"` +} + +// ThanosRulerSpecApplyConfiguration constructs a declarative configuration of the ThanosRulerSpec type for use with +// apply. +func ThanosRulerSpec() *ThanosRulerSpecApplyConfiguration { + return &ThanosRulerSpecApplyConfiguration{} +} + +// WithVersion sets the Version field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Version field is set to the value of the last call. +func (b *ThanosRulerSpecApplyConfiguration) WithVersion(value string) *ThanosRulerSpecApplyConfiguration { + b.Version = &value + return b +} + +// WithPodMetadata sets the PodMetadata field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the PodMetadata field is set to the value of the last call. +func (b *ThanosRulerSpecApplyConfiguration) WithPodMetadata(value *EmbeddedObjectMetadataApplyConfiguration) *ThanosRulerSpecApplyConfiguration { + b.PodMetadata = value + return b +} + +// WithImage sets the Image field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Image field is set to the value of the last call. +func (b *ThanosRulerSpecApplyConfiguration) WithImage(value string) *ThanosRulerSpecApplyConfiguration { + b.Image = &value + return b +} + +// WithImagePullPolicy sets the ImagePullPolicy field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the ImagePullPolicy field is set to the value of the last call. +func (b *ThanosRulerSpecApplyConfiguration) WithImagePullPolicy(value corev1.PullPolicy) *ThanosRulerSpecApplyConfiguration { + b.ImagePullPolicy = &value + return b +} + +// WithImagePullSecrets adds the given value to the ImagePullSecrets field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the ImagePullSecrets field. +func (b *ThanosRulerSpecApplyConfiguration) WithImagePullSecrets(values ...corev1.LocalObjectReference) *ThanosRulerSpecApplyConfiguration { + for i := range values { + b.ImagePullSecrets = append(b.ImagePullSecrets, values[i]) + } + return b +} + +// WithPaused sets the Paused field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Paused field is set to the value of the last call. +func (b *ThanosRulerSpecApplyConfiguration) WithPaused(value bool) *ThanosRulerSpecApplyConfiguration { + b.Paused = &value + return b +} + +// WithReplicas sets the Replicas field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Replicas field is set to the value of the last call. +func (b *ThanosRulerSpecApplyConfiguration) WithReplicas(value int32) *ThanosRulerSpecApplyConfiguration { + b.Replicas = &value + return b +} + +// WithNodeSelector puts the entries into the NodeSelector field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, the entries provided by each call will be put on the NodeSelector field, +// overwriting an existing map entries in NodeSelector field with the same key. +func (b *ThanosRulerSpecApplyConfiguration) WithNodeSelector(entries map[string]string) *ThanosRulerSpecApplyConfiguration { + if b.NodeSelector == nil && len(entries) > 0 { + b.NodeSelector = make(map[string]string, len(entries)) + } + for k, v := range entries { + b.NodeSelector[k] = v + } + return b +} + +// WithResources sets the Resources field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Resources field is set to the value of the last call. +func (b *ThanosRulerSpecApplyConfiguration) WithResources(value corev1.ResourceRequirements) *ThanosRulerSpecApplyConfiguration { + b.Resources = &value + return b +} + +// WithAffinity sets the Affinity field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Affinity field is set to the value of the last call. +func (b *ThanosRulerSpecApplyConfiguration) WithAffinity(value corev1.Affinity) *ThanosRulerSpecApplyConfiguration { + b.Affinity = &value + return b +} + +// WithTolerations adds the given value to the Tolerations field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the Tolerations field. +func (b *ThanosRulerSpecApplyConfiguration) WithTolerations(values ...corev1.Toleration) *ThanosRulerSpecApplyConfiguration { + for i := range values { + b.Tolerations = append(b.Tolerations, values[i]) + } + return b +} + +// WithTopologySpreadConstraints adds the given value to the TopologySpreadConstraints field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the TopologySpreadConstraints field. +func (b *ThanosRulerSpecApplyConfiguration) WithTopologySpreadConstraints(values ...corev1.TopologySpreadConstraint) *ThanosRulerSpecApplyConfiguration { + for i := range values { + b.TopologySpreadConstraints = append(b.TopologySpreadConstraints, values[i]) + } + return b +} + +// WithSecurityContext sets the SecurityContext field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the SecurityContext field is set to the value of the last call. +func (b *ThanosRulerSpecApplyConfiguration) WithSecurityContext(value corev1.PodSecurityContext) *ThanosRulerSpecApplyConfiguration { + b.SecurityContext = &value + return b +} + +// WithDNSPolicy sets the DNSPolicy field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the DNSPolicy field is set to the value of the last call. +func (b *ThanosRulerSpecApplyConfiguration) WithDNSPolicy(value monitoringv1.DNSPolicy) *ThanosRulerSpecApplyConfiguration { + b.DNSPolicy = &value + return b +} + +// WithDNSConfig sets the DNSConfig field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the DNSConfig field is set to the value of the last call. +func (b *ThanosRulerSpecApplyConfiguration) WithDNSConfig(value *PodDNSConfigApplyConfiguration) *ThanosRulerSpecApplyConfiguration { + b.DNSConfig = value + return b +} + +// WithEnableServiceLinks sets the EnableServiceLinks field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the EnableServiceLinks field is set to the value of the last call. +func (b *ThanosRulerSpecApplyConfiguration) WithEnableServiceLinks(value bool) *ThanosRulerSpecApplyConfiguration { + b.EnableServiceLinks = &value + return b +} + +// WithPriorityClassName sets the PriorityClassName field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the PriorityClassName field is set to the value of the last call. +func (b *ThanosRulerSpecApplyConfiguration) WithPriorityClassName(value string) *ThanosRulerSpecApplyConfiguration { + b.PriorityClassName = &value + return b +} + +// WithServiceName sets the ServiceName field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the ServiceName field is set to the value of the last call. +func (b *ThanosRulerSpecApplyConfiguration) WithServiceName(value string) *ThanosRulerSpecApplyConfiguration { + b.ServiceName = &value + return b +} + +// WithServiceAccountName sets the ServiceAccountName field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the ServiceAccountName field is set to the value of the last call. +func (b *ThanosRulerSpecApplyConfiguration) WithServiceAccountName(value string) *ThanosRulerSpecApplyConfiguration { + b.ServiceAccountName = &value + return b +} + +// WithStorage sets the Storage field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Storage field is set to the value of the last call. +func (b *ThanosRulerSpecApplyConfiguration) WithStorage(value *StorageSpecApplyConfiguration) *ThanosRulerSpecApplyConfiguration { + b.Storage = value + return b +} + +// WithVolumes adds the given value to the Volumes field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the Volumes field. +func (b *ThanosRulerSpecApplyConfiguration) WithVolumes(values ...corev1.Volume) *ThanosRulerSpecApplyConfiguration { + for i := range values { + b.Volumes = append(b.Volumes, values[i]) + } + return b +} + +// WithVolumeMounts adds the given value to the VolumeMounts field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the VolumeMounts field. +func (b *ThanosRulerSpecApplyConfiguration) WithVolumeMounts(values ...corev1.VolumeMount) *ThanosRulerSpecApplyConfiguration { + for i := range values { + b.VolumeMounts = append(b.VolumeMounts, values[i]) + } + return b +} + +// WithObjectStorageConfig sets the ObjectStorageConfig field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the ObjectStorageConfig field is set to the value of the last call. +func (b *ThanosRulerSpecApplyConfiguration) WithObjectStorageConfig(value corev1.SecretKeySelector) *ThanosRulerSpecApplyConfiguration { + b.ObjectStorageConfig = &value + return b +} + +// WithObjectStorageConfigFile sets the ObjectStorageConfigFile field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the ObjectStorageConfigFile field is set to the value of the last call. +func (b *ThanosRulerSpecApplyConfiguration) WithObjectStorageConfigFile(value string) *ThanosRulerSpecApplyConfiguration { + b.ObjectStorageConfigFile = &value + return b +} + +// WithListenLocal sets the ListenLocal field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the ListenLocal field is set to the value of the last call. +func (b *ThanosRulerSpecApplyConfiguration) WithListenLocal(value bool) *ThanosRulerSpecApplyConfiguration { + b.ListenLocal = &value + return b +} + +// WithPodManagementPolicy sets the PodManagementPolicy field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the PodManagementPolicy field is set to the value of the last call. +func (b *ThanosRulerSpecApplyConfiguration) WithPodManagementPolicy(value monitoringv1.PodManagementPolicyType) *ThanosRulerSpecApplyConfiguration { + b.PodManagementPolicy = &value + return b +} + +// WithUpdateStrategy sets the UpdateStrategy field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the UpdateStrategy field is set to the value of the last call. +func (b *ThanosRulerSpecApplyConfiguration) WithUpdateStrategy(value *StatefulSetUpdateStrategyApplyConfiguration) *ThanosRulerSpecApplyConfiguration { + b.UpdateStrategy = value + return b +} + +// WithQueryEndpoints adds the given value to the QueryEndpoints field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the QueryEndpoints field. +func (b *ThanosRulerSpecApplyConfiguration) WithQueryEndpoints(values ...string) *ThanosRulerSpecApplyConfiguration { + for i := range values { + b.QueryEndpoints = append(b.QueryEndpoints, values[i]) + } + return b +} + +// WithQueryConfig sets the QueryConfig field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the QueryConfig field is set to the value of the last call. +func (b *ThanosRulerSpecApplyConfiguration) WithQueryConfig(value corev1.SecretKeySelector) *ThanosRulerSpecApplyConfiguration { + b.QueryConfig = &value + return b +} + +// WithAlertManagersURL adds the given value to the AlertManagersURL field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the AlertManagersURL field. +func (b *ThanosRulerSpecApplyConfiguration) WithAlertManagersURL(values ...string) *ThanosRulerSpecApplyConfiguration { + for i := range values { + b.AlertManagersURL = append(b.AlertManagersURL, values[i]) + } + return b +} + +// WithAlertManagersConfig sets the AlertManagersConfig field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the AlertManagersConfig field is set to the value of the last call. +func (b *ThanosRulerSpecApplyConfiguration) WithAlertManagersConfig(value corev1.SecretKeySelector) *ThanosRulerSpecApplyConfiguration { + b.AlertManagersConfig = &value + return b +} + +// WithRuleSelector sets the RuleSelector field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the RuleSelector field is set to the value of the last call. +func (b *ThanosRulerSpecApplyConfiguration) WithRuleSelector(value *metav1.LabelSelectorApplyConfiguration) *ThanosRulerSpecApplyConfiguration { + b.RuleSelector = value + return b +} + +// WithRuleNamespaceSelector sets the RuleNamespaceSelector field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the RuleNamespaceSelector field is set to the value of the last call. +func (b *ThanosRulerSpecApplyConfiguration) WithRuleNamespaceSelector(value *metav1.LabelSelectorApplyConfiguration) *ThanosRulerSpecApplyConfiguration { + b.RuleNamespaceSelector = value + return b +} + +// WithEnforcedNamespaceLabel sets the EnforcedNamespaceLabel field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the EnforcedNamespaceLabel field is set to the value of the last call. +func (b *ThanosRulerSpecApplyConfiguration) WithEnforcedNamespaceLabel(value string) *ThanosRulerSpecApplyConfiguration { + b.EnforcedNamespaceLabel = &value + return b +} + +// WithExcludedFromEnforcement adds the given value to the ExcludedFromEnforcement field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the ExcludedFromEnforcement field. +func (b *ThanosRulerSpecApplyConfiguration) WithExcludedFromEnforcement(values ...*ObjectReferenceApplyConfiguration) *ThanosRulerSpecApplyConfiguration { + for i := range values { + if values[i] == nil { + panic("nil value passed to WithExcludedFromEnforcement") + } + b.ExcludedFromEnforcement = append(b.ExcludedFromEnforcement, *values[i]) + } + return b +} + +// WithPrometheusRulesExcludedFromEnforce adds the given value to the PrometheusRulesExcludedFromEnforce field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the PrometheusRulesExcludedFromEnforce field. +func (b *ThanosRulerSpecApplyConfiguration) WithPrometheusRulesExcludedFromEnforce(values ...*PrometheusRuleExcludeConfigApplyConfiguration) *ThanosRulerSpecApplyConfiguration { + for i := range values { + if values[i] == nil { + panic("nil value passed to WithPrometheusRulesExcludedFromEnforce") + } + b.PrometheusRulesExcludedFromEnforce = append(b.PrometheusRulesExcludedFromEnforce, *values[i]) + } + return b +} + +// WithLogLevel sets the LogLevel field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the LogLevel field is set to the value of the last call. +func (b *ThanosRulerSpecApplyConfiguration) WithLogLevel(value string) *ThanosRulerSpecApplyConfiguration { + b.LogLevel = &value + return b +} + +// WithLogFormat sets the LogFormat field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the LogFormat field is set to the value of the last call. +func (b *ThanosRulerSpecApplyConfiguration) WithLogFormat(value string) *ThanosRulerSpecApplyConfiguration { + b.LogFormat = &value + return b +} + +// WithPortName sets the PortName field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the PortName field is set to the value of the last call. +func (b *ThanosRulerSpecApplyConfiguration) WithPortName(value string) *ThanosRulerSpecApplyConfiguration { + b.PortName = &value + return b +} + +// WithEvaluationInterval sets the EvaluationInterval field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the EvaluationInterval field is set to the value of the last call. +func (b *ThanosRulerSpecApplyConfiguration) WithEvaluationInterval(value monitoringv1.Duration) *ThanosRulerSpecApplyConfiguration { + b.EvaluationInterval = &value + return b +} + +// WithResendDelay sets the ResendDelay field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the ResendDelay field is set to the value of the last call. +func (b *ThanosRulerSpecApplyConfiguration) WithResendDelay(value monitoringv1.Duration) *ThanosRulerSpecApplyConfiguration { + b.ResendDelay = &value + return b +} + +// WithRuleOutageTolerance sets the RuleOutageTolerance field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the RuleOutageTolerance field is set to the value of the last call. +func (b *ThanosRulerSpecApplyConfiguration) WithRuleOutageTolerance(value monitoringv1.Duration) *ThanosRulerSpecApplyConfiguration { + b.RuleOutageTolerance = &value + return b +} + +// WithRuleQueryOffset sets the RuleQueryOffset field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the RuleQueryOffset field is set to the value of the last call. +func (b *ThanosRulerSpecApplyConfiguration) WithRuleQueryOffset(value monitoringv1.Duration) *ThanosRulerSpecApplyConfiguration { + b.RuleQueryOffset = &value + return b +} + +// WithRuleConcurrentEval sets the RuleConcurrentEval field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the RuleConcurrentEval field is set to the value of the last call. +func (b *ThanosRulerSpecApplyConfiguration) WithRuleConcurrentEval(value int32) *ThanosRulerSpecApplyConfiguration { + b.RuleConcurrentEval = &value + return b +} + +// WithRuleGracePeriod sets the RuleGracePeriod field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the RuleGracePeriod field is set to the value of the last call. +func (b *ThanosRulerSpecApplyConfiguration) WithRuleGracePeriod(value monitoringv1.Duration) *ThanosRulerSpecApplyConfiguration { + b.RuleGracePeriod = &value + return b +} + +// WithRetention sets the Retention field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Retention field is set to the value of the last call. +func (b *ThanosRulerSpecApplyConfiguration) WithRetention(value monitoringv1.Duration) *ThanosRulerSpecApplyConfiguration { + b.Retention = &value + return b +} + +// WithContainers adds the given value to the Containers field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the Containers field. +func (b *ThanosRulerSpecApplyConfiguration) WithContainers(values ...corev1.Container) *ThanosRulerSpecApplyConfiguration { + for i := range values { + b.Containers = append(b.Containers, values[i]) + } + return b +} + +// WithInitContainers adds the given value to the InitContainers field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the InitContainers field. +func (b *ThanosRulerSpecApplyConfiguration) WithInitContainers(values ...corev1.Container) *ThanosRulerSpecApplyConfiguration { + for i := range values { + b.InitContainers = append(b.InitContainers, values[i]) + } + return b +} + +// WithTracingConfig sets the TracingConfig field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the TracingConfig field is set to the value of the last call. +func (b *ThanosRulerSpecApplyConfiguration) WithTracingConfig(value corev1.SecretKeySelector) *ThanosRulerSpecApplyConfiguration { + b.TracingConfig = &value + return b +} + +// WithTracingConfigFile sets the TracingConfigFile field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the TracingConfigFile field is set to the value of the last call. +func (b *ThanosRulerSpecApplyConfiguration) WithTracingConfigFile(value string) *ThanosRulerSpecApplyConfiguration { + b.TracingConfigFile = &value + return b +} + +// WithLabels puts the entries into the Labels field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, the entries provided by each call will be put on the Labels field, +// overwriting an existing map entries in Labels field with the same key. +func (b *ThanosRulerSpecApplyConfiguration) WithLabels(entries map[string]string) *ThanosRulerSpecApplyConfiguration { + if b.Labels == nil && len(entries) > 0 { + b.Labels = make(map[string]string, len(entries)) + } + for k, v := range entries { + b.Labels[k] = v + } + return b +} + +// WithAlertDropLabels adds the given value to the AlertDropLabels field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the AlertDropLabels field. +func (b *ThanosRulerSpecApplyConfiguration) WithAlertDropLabels(values ...string) *ThanosRulerSpecApplyConfiguration { + for i := range values { + b.AlertDropLabels = append(b.AlertDropLabels, values[i]) + } + return b +} + +// WithExternalPrefix sets the ExternalPrefix field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the ExternalPrefix field is set to the value of the last call. +func (b *ThanosRulerSpecApplyConfiguration) WithExternalPrefix(value string) *ThanosRulerSpecApplyConfiguration { + b.ExternalPrefix = &value + return b +} + +// WithRoutePrefix sets the RoutePrefix field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the RoutePrefix field is set to the value of the last call. +func (b *ThanosRulerSpecApplyConfiguration) WithRoutePrefix(value string) *ThanosRulerSpecApplyConfiguration { + b.RoutePrefix = &value + return b +} + +// WithGRPCServerTLSConfig sets the GRPCServerTLSConfig field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the GRPCServerTLSConfig field is set to the value of the last call. +func (b *ThanosRulerSpecApplyConfiguration) WithGRPCServerTLSConfig(value *TLSConfigApplyConfiguration) *ThanosRulerSpecApplyConfiguration { + b.GRPCServerTLSConfig = value + return b +} + +// WithAlertQueryURL sets the AlertQueryURL field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the AlertQueryURL field is set to the value of the last call. +func (b *ThanosRulerSpecApplyConfiguration) WithAlertQueryURL(value string) *ThanosRulerSpecApplyConfiguration { + b.AlertQueryURL = &value + return b +} + +// WithMinReadySeconds sets the MinReadySeconds field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the MinReadySeconds field is set to the value of the last call. +func (b *ThanosRulerSpecApplyConfiguration) WithMinReadySeconds(value int32) *ThanosRulerSpecApplyConfiguration { + b.MinReadySeconds = &value + return b +} + +// WithAlertRelabelConfigs sets the AlertRelabelConfigs field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the AlertRelabelConfigs field is set to the value of the last call. +func (b *ThanosRulerSpecApplyConfiguration) WithAlertRelabelConfigs(value corev1.SecretKeySelector) *ThanosRulerSpecApplyConfiguration { + b.AlertRelabelConfigs = &value + return b +} + +// WithAlertRelabelConfigFile sets the AlertRelabelConfigFile field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the AlertRelabelConfigFile field is set to the value of the last call. +func (b *ThanosRulerSpecApplyConfiguration) WithAlertRelabelConfigFile(value string) *ThanosRulerSpecApplyConfiguration { + b.AlertRelabelConfigFile = &value + return b +} + +// WithHostAliases adds the given value to the HostAliases field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the HostAliases field. +func (b *ThanosRulerSpecApplyConfiguration) WithHostAliases(values ...*HostAliasApplyConfiguration) *ThanosRulerSpecApplyConfiguration { + for i := range values { + if values[i] == nil { + panic("nil value passed to WithHostAliases") + } + b.HostAliases = append(b.HostAliases, *values[i]) + } + return b +} + +// WithAdditionalArgs adds the given value to the AdditionalArgs field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the AdditionalArgs field. +func (b *ThanosRulerSpecApplyConfiguration) WithAdditionalArgs(values ...*ArgumentApplyConfiguration) *ThanosRulerSpecApplyConfiguration { + for i := range values { + if values[i] == nil { + panic("nil value passed to WithAdditionalArgs") + } + b.AdditionalArgs = append(b.AdditionalArgs, *values[i]) + } + return b +} + +// WithWeb sets the Web field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Web field is set to the value of the last call. +func (b *ThanosRulerSpecApplyConfiguration) WithWeb(value *ThanosRulerWebSpecApplyConfiguration) *ThanosRulerSpecApplyConfiguration { + b.Web = value + return b +} + +// WithRemoteWrite adds the given value to the RemoteWrite field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the RemoteWrite field. +func (b *ThanosRulerSpecApplyConfiguration) WithRemoteWrite(values ...*RemoteWriteSpecApplyConfiguration) *ThanosRulerSpecApplyConfiguration { + for i := range values { + if values[i] == nil { + panic("nil value passed to WithRemoteWrite") + } + b.RemoteWrite = append(b.RemoteWrite, *values[i]) + } + return b +} + +// WithTerminationGracePeriodSeconds sets the TerminationGracePeriodSeconds field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the TerminationGracePeriodSeconds field is set to the value of the last call. +func (b *ThanosRulerSpecApplyConfiguration) WithTerminationGracePeriodSeconds(value int64) *ThanosRulerSpecApplyConfiguration { + b.TerminationGracePeriodSeconds = &value + return b +} + +// WithEnableFeatures adds the given value to the EnableFeatures field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the EnableFeatures field. +func (b *ThanosRulerSpecApplyConfiguration) WithEnableFeatures(values ...monitoringv1.EnableFeature) *ThanosRulerSpecApplyConfiguration { + for i := range values { + b.EnableFeatures = append(b.EnableFeatures, values[i]) + } + return b +} + +// WithHostUsers sets the HostUsers field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the HostUsers field is set to the value of the last call. +func (b *ThanosRulerSpecApplyConfiguration) WithHostUsers(value bool) *ThanosRulerSpecApplyConfiguration { + b.HostUsers = &value + return b +} diff --git a/vendor/github.com/prometheus-operator/prometheus-operator/pkg/client/applyconfiguration/monitoring/v1/thanosrulerstatus.go b/vendor/github.com/prometheus-operator/prometheus-operator/pkg/client/applyconfiguration/monitoring/v1/thanosrulerstatus.go new file mode 100644 index 0000000000..e37031fb92 --- /dev/null +++ b/vendor/github.com/prometheus-operator/prometheus-operator/pkg/client/applyconfiguration/monitoring/v1/thanosrulerstatus.go @@ -0,0 +1,87 @@ +// Copyright The prometheus-operator Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1 + +// ThanosRulerStatusApplyConfiguration represents a declarative configuration of the ThanosRulerStatus type for use +// with apply. +type ThanosRulerStatusApplyConfiguration struct { + Paused *bool `json:"paused,omitempty"` + Replicas *int32 `json:"replicas,omitempty"` + UpdatedReplicas *int32 `json:"updatedReplicas,omitempty"` + AvailableReplicas *int32 `json:"availableReplicas,omitempty"` + UnavailableReplicas *int32 `json:"unavailableReplicas,omitempty"` + Conditions []ConditionApplyConfiguration `json:"conditions,omitempty"` +} + +// ThanosRulerStatusApplyConfiguration constructs a declarative configuration of the ThanosRulerStatus type for use with +// apply. +func ThanosRulerStatus() *ThanosRulerStatusApplyConfiguration { + return &ThanosRulerStatusApplyConfiguration{} +} + +// WithPaused sets the Paused field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Paused field is set to the value of the last call. +func (b *ThanosRulerStatusApplyConfiguration) WithPaused(value bool) *ThanosRulerStatusApplyConfiguration { + b.Paused = &value + return b +} + +// WithReplicas sets the Replicas field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Replicas field is set to the value of the last call. +func (b *ThanosRulerStatusApplyConfiguration) WithReplicas(value int32) *ThanosRulerStatusApplyConfiguration { + b.Replicas = &value + return b +} + +// WithUpdatedReplicas sets the UpdatedReplicas field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the UpdatedReplicas field is set to the value of the last call. +func (b *ThanosRulerStatusApplyConfiguration) WithUpdatedReplicas(value int32) *ThanosRulerStatusApplyConfiguration { + b.UpdatedReplicas = &value + return b +} + +// WithAvailableReplicas sets the AvailableReplicas field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the AvailableReplicas field is set to the value of the last call. +func (b *ThanosRulerStatusApplyConfiguration) WithAvailableReplicas(value int32) *ThanosRulerStatusApplyConfiguration { + b.AvailableReplicas = &value + return b +} + +// WithUnavailableReplicas sets the UnavailableReplicas field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the UnavailableReplicas field is set to the value of the last call. +func (b *ThanosRulerStatusApplyConfiguration) WithUnavailableReplicas(value int32) *ThanosRulerStatusApplyConfiguration { + b.UnavailableReplicas = &value + return b +} + +// WithConditions adds the given value to the Conditions field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the Conditions field. +func (b *ThanosRulerStatusApplyConfiguration) WithConditions(values ...*ConditionApplyConfiguration) *ThanosRulerStatusApplyConfiguration { + for i := range values { + if values[i] == nil { + panic("nil value passed to WithConditions") + } + b.Conditions = append(b.Conditions, *values[i]) + } + return b +} diff --git a/vendor/github.com/prometheus-operator/prometheus-operator/pkg/client/applyconfiguration/monitoring/v1/thanosrulerwebspec.go b/vendor/github.com/prometheus-operator/prometheus-operator/pkg/client/applyconfiguration/monitoring/v1/thanosrulerwebspec.go new file mode 100644 index 0000000000..098b2fd709 --- /dev/null +++ b/vendor/github.com/prometheus-operator/prometheus-operator/pkg/client/applyconfiguration/monitoring/v1/thanosrulerwebspec.go @@ -0,0 +1,45 @@ +// Copyright The prometheus-operator Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1 + +// ThanosRulerWebSpecApplyConfiguration represents a declarative configuration of the ThanosRulerWebSpec type for use +// with apply. +type ThanosRulerWebSpecApplyConfiguration struct { + WebConfigFileFieldsApplyConfiguration `json:",inline"` +} + +// ThanosRulerWebSpecApplyConfiguration constructs a declarative configuration of the ThanosRulerWebSpec type for use with +// apply. +func ThanosRulerWebSpec() *ThanosRulerWebSpecApplyConfiguration { + return &ThanosRulerWebSpecApplyConfiguration{} +} + +// WithTLSConfig sets the TLSConfig field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the TLSConfig field is set to the value of the last call. +func (b *ThanosRulerWebSpecApplyConfiguration) WithTLSConfig(value *WebTLSConfigApplyConfiguration) *ThanosRulerWebSpecApplyConfiguration { + b.WebConfigFileFieldsApplyConfiguration.TLSConfig = value + return b +} + +// WithHTTPConfig sets the HTTPConfig field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the HTTPConfig field is set to the value of the last call. +func (b *ThanosRulerWebSpecApplyConfiguration) WithHTTPConfig(value *WebHTTPConfigApplyConfiguration) *ThanosRulerWebSpecApplyConfiguration { + b.WebConfigFileFieldsApplyConfiguration.HTTPConfig = value + return b +} diff --git a/vendor/github.com/prometheus-operator/prometheus-operator/pkg/client/applyconfiguration/monitoring/v1/thanosspec.go b/vendor/github.com/prometheus-operator/prometheus-operator/pkg/client/applyconfiguration/monitoring/v1/thanosspec.go new file mode 100644 index 0000000000..af6df2c12d --- /dev/null +++ b/vendor/github.com/prometheus-operator/prometheus-operator/pkg/client/applyconfiguration/monitoring/v1/thanosspec.go @@ -0,0 +1,247 @@ +// Copyright The prometheus-operator Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1 + +import ( + monitoringv1 "github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring/v1" + corev1 "k8s.io/api/core/v1" +) + +// ThanosSpecApplyConfiguration represents a declarative configuration of the ThanosSpec type for use +// with apply. +type ThanosSpecApplyConfiguration struct { + Image *string `json:"image,omitempty"` + Version *string `json:"version,omitempty"` + Tag *string `json:"tag,omitempty"` + SHA *string `json:"sha,omitempty"` + BaseImage *string `json:"baseImage,omitempty"` + Resources *corev1.ResourceRequirements `json:"resources,omitempty"` + ObjectStorageConfig *corev1.SecretKeySelector `json:"objectStorageConfig,omitempty"` + ObjectStorageConfigFile *string `json:"objectStorageConfigFile,omitempty"` + ListenLocal *bool `json:"listenLocal,omitempty"` + GRPCListenLocal *bool `json:"grpcListenLocal,omitempty"` + HTTPListenLocal *bool `json:"httpListenLocal,omitempty"` + TracingConfig *corev1.SecretKeySelector `json:"tracingConfig,omitempty"` + TracingConfigFile *string `json:"tracingConfigFile,omitempty"` + GRPCServerTLSConfig *TLSConfigApplyConfiguration `json:"grpcServerTlsConfig,omitempty"` + LogLevel *string `json:"logLevel,omitempty"` + LogFormat *string `json:"logFormat,omitempty"` + MinTime *string `json:"minTime,omitempty"` + BlockDuration *monitoringv1.Duration `json:"blockSize,omitempty"` + ReadyTimeout *monitoringv1.Duration `json:"readyTimeout,omitempty"` + GetConfigInterval *monitoringv1.Duration `json:"getConfigInterval,omitempty"` + GetConfigTimeout *monitoringv1.Duration `json:"getConfigTimeout,omitempty"` + VolumeMounts []corev1.VolumeMount `json:"volumeMounts,omitempty"` + AdditionalArgs []ArgumentApplyConfiguration `json:"additionalArgs,omitempty"` +} + +// ThanosSpecApplyConfiguration constructs a declarative configuration of the ThanosSpec type for use with +// apply. +func ThanosSpec() *ThanosSpecApplyConfiguration { + return &ThanosSpecApplyConfiguration{} +} + +// WithImage sets the Image field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Image field is set to the value of the last call. +func (b *ThanosSpecApplyConfiguration) WithImage(value string) *ThanosSpecApplyConfiguration { + b.Image = &value + return b +} + +// WithVersion sets the Version field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Version field is set to the value of the last call. +func (b *ThanosSpecApplyConfiguration) WithVersion(value string) *ThanosSpecApplyConfiguration { + b.Version = &value + return b +} + +// WithTag sets the Tag field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Tag field is set to the value of the last call. +func (b *ThanosSpecApplyConfiguration) WithTag(value string) *ThanosSpecApplyConfiguration { + b.Tag = &value + return b +} + +// WithSHA sets the SHA field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the SHA field is set to the value of the last call. +func (b *ThanosSpecApplyConfiguration) WithSHA(value string) *ThanosSpecApplyConfiguration { + b.SHA = &value + return b +} + +// WithBaseImage sets the BaseImage field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the BaseImage field is set to the value of the last call. +func (b *ThanosSpecApplyConfiguration) WithBaseImage(value string) *ThanosSpecApplyConfiguration { + b.BaseImage = &value + return b +} + +// WithResources sets the Resources field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Resources field is set to the value of the last call. +func (b *ThanosSpecApplyConfiguration) WithResources(value corev1.ResourceRequirements) *ThanosSpecApplyConfiguration { + b.Resources = &value + return b +} + +// WithObjectStorageConfig sets the ObjectStorageConfig field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the ObjectStorageConfig field is set to the value of the last call. +func (b *ThanosSpecApplyConfiguration) WithObjectStorageConfig(value corev1.SecretKeySelector) *ThanosSpecApplyConfiguration { + b.ObjectStorageConfig = &value + return b +} + +// WithObjectStorageConfigFile sets the ObjectStorageConfigFile field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the ObjectStorageConfigFile field is set to the value of the last call. +func (b *ThanosSpecApplyConfiguration) WithObjectStorageConfigFile(value string) *ThanosSpecApplyConfiguration { + b.ObjectStorageConfigFile = &value + return b +} + +// WithListenLocal sets the ListenLocal field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the ListenLocal field is set to the value of the last call. +func (b *ThanosSpecApplyConfiguration) WithListenLocal(value bool) *ThanosSpecApplyConfiguration { + b.ListenLocal = &value + return b +} + +// WithGRPCListenLocal sets the GRPCListenLocal field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the GRPCListenLocal field is set to the value of the last call. +func (b *ThanosSpecApplyConfiguration) WithGRPCListenLocal(value bool) *ThanosSpecApplyConfiguration { + b.GRPCListenLocal = &value + return b +} + +// WithHTTPListenLocal sets the HTTPListenLocal field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the HTTPListenLocal field is set to the value of the last call. +func (b *ThanosSpecApplyConfiguration) WithHTTPListenLocal(value bool) *ThanosSpecApplyConfiguration { + b.HTTPListenLocal = &value + return b +} + +// WithTracingConfig sets the TracingConfig field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the TracingConfig field is set to the value of the last call. +func (b *ThanosSpecApplyConfiguration) WithTracingConfig(value corev1.SecretKeySelector) *ThanosSpecApplyConfiguration { + b.TracingConfig = &value + return b +} + +// WithTracingConfigFile sets the TracingConfigFile field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the TracingConfigFile field is set to the value of the last call. +func (b *ThanosSpecApplyConfiguration) WithTracingConfigFile(value string) *ThanosSpecApplyConfiguration { + b.TracingConfigFile = &value + return b +} + +// WithGRPCServerTLSConfig sets the GRPCServerTLSConfig field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the GRPCServerTLSConfig field is set to the value of the last call. +func (b *ThanosSpecApplyConfiguration) WithGRPCServerTLSConfig(value *TLSConfigApplyConfiguration) *ThanosSpecApplyConfiguration { + b.GRPCServerTLSConfig = value + return b +} + +// WithLogLevel sets the LogLevel field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the LogLevel field is set to the value of the last call. +func (b *ThanosSpecApplyConfiguration) WithLogLevel(value string) *ThanosSpecApplyConfiguration { + b.LogLevel = &value + return b +} + +// WithLogFormat sets the LogFormat field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the LogFormat field is set to the value of the last call. +func (b *ThanosSpecApplyConfiguration) WithLogFormat(value string) *ThanosSpecApplyConfiguration { + b.LogFormat = &value + return b +} + +// WithMinTime sets the MinTime field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the MinTime field is set to the value of the last call. +func (b *ThanosSpecApplyConfiguration) WithMinTime(value string) *ThanosSpecApplyConfiguration { + b.MinTime = &value + return b +} + +// WithBlockDuration sets the BlockDuration field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the BlockDuration field is set to the value of the last call. +func (b *ThanosSpecApplyConfiguration) WithBlockDuration(value monitoringv1.Duration) *ThanosSpecApplyConfiguration { + b.BlockDuration = &value + return b +} + +// WithReadyTimeout sets the ReadyTimeout field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the ReadyTimeout field is set to the value of the last call. +func (b *ThanosSpecApplyConfiguration) WithReadyTimeout(value monitoringv1.Duration) *ThanosSpecApplyConfiguration { + b.ReadyTimeout = &value + return b +} + +// WithGetConfigInterval sets the GetConfigInterval field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the GetConfigInterval field is set to the value of the last call. +func (b *ThanosSpecApplyConfiguration) WithGetConfigInterval(value monitoringv1.Duration) *ThanosSpecApplyConfiguration { + b.GetConfigInterval = &value + return b +} + +// WithGetConfigTimeout sets the GetConfigTimeout field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the GetConfigTimeout field is set to the value of the last call. +func (b *ThanosSpecApplyConfiguration) WithGetConfigTimeout(value monitoringv1.Duration) *ThanosSpecApplyConfiguration { + b.GetConfigTimeout = &value + return b +} + +// WithVolumeMounts adds the given value to the VolumeMounts field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the VolumeMounts field. +func (b *ThanosSpecApplyConfiguration) WithVolumeMounts(values ...corev1.VolumeMount) *ThanosSpecApplyConfiguration { + for i := range values { + b.VolumeMounts = append(b.VolumeMounts, values[i]) + } + return b +} + +// WithAdditionalArgs adds the given value to the AdditionalArgs field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the AdditionalArgs field. +func (b *ThanosSpecApplyConfiguration) WithAdditionalArgs(values ...*ArgumentApplyConfiguration) *ThanosSpecApplyConfiguration { + for i := range values { + if values[i] == nil { + panic("nil value passed to WithAdditionalArgs") + } + b.AdditionalArgs = append(b.AdditionalArgs, *values[i]) + } + return b +} diff --git a/vendor/github.com/prometheus-operator/prometheus-operator/pkg/client/applyconfiguration/monitoring/v1/tlsconfig.go b/vendor/github.com/prometheus-operator/prometheus-operator/pkg/client/applyconfiguration/monitoring/v1/tlsconfig.go new file mode 100644 index 0000000000..d18d96de21 --- /dev/null +++ b/vendor/github.com/prometheus-operator/prometheus-operator/pkg/client/applyconfiguration/monitoring/v1/tlsconfig.go @@ -0,0 +1,115 @@ +// Copyright The prometheus-operator Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1 + +import ( + monitoringv1 "github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring/v1" + corev1 "k8s.io/api/core/v1" +) + +// TLSConfigApplyConfiguration represents a declarative configuration of the TLSConfig type for use +// with apply. +type TLSConfigApplyConfiguration struct { + SafeTLSConfigApplyConfiguration `json:",inline"` + TLSFilesConfigApplyConfiguration `json:",inline"` +} + +// TLSConfigApplyConfiguration constructs a declarative configuration of the TLSConfig type for use with +// apply. +func TLSConfig() *TLSConfigApplyConfiguration { + return &TLSConfigApplyConfiguration{} +} + +// WithCA sets the CA field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the CA field is set to the value of the last call. +func (b *TLSConfigApplyConfiguration) WithCA(value *SecretOrConfigMapApplyConfiguration) *TLSConfigApplyConfiguration { + b.SafeTLSConfigApplyConfiguration.CA = value + return b +} + +// WithCert sets the Cert field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Cert field is set to the value of the last call. +func (b *TLSConfigApplyConfiguration) WithCert(value *SecretOrConfigMapApplyConfiguration) *TLSConfigApplyConfiguration { + b.SafeTLSConfigApplyConfiguration.Cert = value + return b +} + +// WithKeySecret sets the KeySecret field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the KeySecret field is set to the value of the last call. +func (b *TLSConfigApplyConfiguration) WithKeySecret(value corev1.SecretKeySelector) *TLSConfigApplyConfiguration { + b.SafeTLSConfigApplyConfiguration.KeySecret = &value + return b +} + +// WithServerName sets the ServerName field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the ServerName field is set to the value of the last call. +func (b *TLSConfigApplyConfiguration) WithServerName(value string) *TLSConfigApplyConfiguration { + b.SafeTLSConfigApplyConfiguration.ServerName = &value + return b +} + +// WithInsecureSkipVerify sets the InsecureSkipVerify field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the InsecureSkipVerify field is set to the value of the last call. +func (b *TLSConfigApplyConfiguration) WithInsecureSkipVerify(value bool) *TLSConfigApplyConfiguration { + b.SafeTLSConfigApplyConfiguration.InsecureSkipVerify = &value + return b +} + +// WithMinVersion sets the MinVersion field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the MinVersion field is set to the value of the last call. +func (b *TLSConfigApplyConfiguration) WithMinVersion(value monitoringv1.TLSVersion) *TLSConfigApplyConfiguration { + b.SafeTLSConfigApplyConfiguration.MinVersion = &value + return b +} + +// WithMaxVersion sets the MaxVersion field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the MaxVersion field is set to the value of the last call. +func (b *TLSConfigApplyConfiguration) WithMaxVersion(value monitoringv1.TLSVersion) *TLSConfigApplyConfiguration { + b.SafeTLSConfigApplyConfiguration.MaxVersion = &value + return b +} + +// WithCAFile sets the CAFile field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the CAFile field is set to the value of the last call. +func (b *TLSConfigApplyConfiguration) WithCAFile(value string) *TLSConfigApplyConfiguration { + b.TLSFilesConfigApplyConfiguration.CAFile = &value + return b +} + +// WithCertFile sets the CertFile field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the CertFile field is set to the value of the last call. +func (b *TLSConfigApplyConfiguration) WithCertFile(value string) *TLSConfigApplyConfiguration { + b.TLSFilesConfigApplyConfiguration.CertFile = &value + return b +} + +// WithKeyFile sets the KeyFile field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the KeyFile field is set to the value of the last call. +func (b *TLSConfigApplyConfiguration) WithKeyFile(value string) *TLSConfigApplyConfiguration { + b.TLSFilesConfigApplyConfiguration.KeyFile = &value + return b +} diff --git a/vendor/github.com/prometheus-operator/prometheus-operator/pkg/client/applyconfiguration/monitoring/v1/tlsfilesconfig.go b/vendor/github.com/prometheus-operator/prometheus-operator/pkg/client/applyconfiguration/monitoring/v1/tlsfilesconfig.go new file mode 100644 index 0000000000..2458170495 --- /dev/null +++ b/vendor/github.com/prometheus-operator/prometheus-operator/pkg/client/applyconfiguration/monitoring/v1/tlsfilesconfig.go @@ -0,0 +1,55 @@ +// Copyright The prometheus-operator Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1 + +// TLSFilesConfigApplyConfiguration represents a declarative configuration of the TLSFilesConfig type for use +// with apply. +type TLSFilesConfigApplyConfiguration struct { + CAFile *string `json:"caFile,omitempty"` + CertFile *string `json:"certFile,omitempty"` + KeyFile *string `json:"keyFile,omitempty"` +} + +// TLSFilesConfigApplyConfiguration constructs a declarative configuration of the TLSFilesConfig type for use with +// apply. +func TLSFilesConfig() *TLSFilesConfigApplyConfiguration { + return &TLSFilesConfigApplyConfiguration{} +} + +// WithCAFile sets the CAFile field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the CAFile field is set to the value of the last call. +func (b *TLSFilesConfigApplyConfiguration) WithCAFile(value string) *TLSFilesConfigApplyConfiguration { + b.CAFile = &value + return b +} + +// WithCertFile sets the CertFile field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the CertFile field is set to the value of the last call. +func (b *TLSFilesConfigApplyConfiguration) WithCertFile(value string) *TLSFilesConfigApplyConfiguration { + b.CertFile = &value + return b +} + +// WithKeyFile sets the KeyFile field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the KeyFile field is set to the value of the last call. +func (b *TLSFilesConfigApplyConfiguration) WithKeyFile(value string) *TLSFilesConfigApplyConfiguration { + b.KeyFile = &value + return b +} diff --git a/vendor/github.com/prometheus-operator/prometheus-operator/pkg/client/applyconfiguration/monitoring/v1/topologyspreadconstraint.go b/vendor/github.com/prometheus-operator/prometheus-operator/pkg/client/applyconfiguration/monitoring/v1/topologyspreadconstraint.go new file mode 100644 index 0000000000..6c5944a1c2 --- /dev/null +++ b/vendor/github.com/prometheus-operator/prometheus-operator/pkg/client/applyconfiguration/monitoring/v1/topologyspreadconstraint.go @@ -0,0 +1,110 @@ +// Copyright The prometheus-operator Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1 + +import ( + monitoringv1 "github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring/v1" + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/client-go/applyconfigurations/meta/v1" +) + +// TopologySpreadConstraintApplyConfiguration represents a declarative configuration of the TopologySpreadConstraint type for use +// with apply. +type TopologySpreadConstraintApplyConfiguration struct { + CoreV1TopologySpreadConstraintApplyConfiguration `json:",inline"` + AdditionalLabelSelectors *monitoringv1.AdditionalLabelSelectors `json:"additionalLabelSelectors,omitempty"` +} + +// TopologySpreadConstraintApplyConfiguration constructs a declarative configuration of the TopologySpreadConstraint type for use with +// apply. +func TopologySpreadConstraint() *TopologySpreadConstraintApplyConfiguration { + return &TopologySpreadConstraintApplyConfiguration{} +} + +// WithMaxSkew sets the MaxSkew field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the MaxSkew field is set to the value of the last call. +func (b *TopologySpreadConstraintApplyConfiguration) WithMaxSkew(value int32) *TopologySpreadConstraintApplyConfiguration { + b.CoreV1TopologySpreadConstraintApplyConfiguration.MaxSkew = &value + return b +} + +// WithTopologyKey sets the TopologyKey field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the TopologyKey field is set to the value of the last call. +func (b *TopologySpreadConstraintApplyConfiguration) WithTopologyKey(value string) *TopologySpreadConstraintApplyConfiguration { + b.CoreV1TopologySpreadConstraintApplyConfiguration.TopologyKey = &value + return b +} + +// WithWhenUnsatisfiable sets the WhenUnsatisfiable field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the WhenUnsatisfiable field is set to the value of the last call. +func (b *TopologySpreadConstraintApplyConfiguration) WithWhenUnsatisfiable(value corev1.UnsatisfiableConstraintAction) *TopologySpreadConstraintApplyConfiguration { + b.CoreV1TopologySpreadConstraintApplyConfiguration.WhenUnsatisfiable = &value + return b +} + +// WithLabelSelector sets the LabelSelector field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the LabelSelector field is set to the value of the last call. +func (b *TopologySpreadConstraintApplyConfiguration) WithLabelSelector(value *metav1.LabelSelectorApplyConfiguration) *TopologySpreadConstraintApplyConfiguration { + b.CoreV1TopologySpreadConstraintApplyConfiguration.LabelSelector = value + return b +} + +// WithMinDomains sets the MinDomains field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the MinDomains field is set to the value of the last call. +func (b *TopologySpreadConstraintApplyConfiguration) WithMinDomains(value int32) *TopologySpreadConstraintApplyConfiguration { + b.CoreV1TopologySpreadConstraintApplyConfiguration.MinDomains = &value + return b +} + +// WithNodeAffinityPolicy sets the NodeAffinityPolicy field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the NodeAffinityPolicy field is set to the value of the last call. +func (b *TopologySpreadConstraintApplyConfiguration) WithNodeAffinityPolicy(value corev1.NodeInclusionPolicy) *TopologySpreadConstraintApplyConfiguration { + b.CoreV1TopologySpreadConstraintApplyConfiguration.NodeAffinityPolicy = &value + return b +} + +// WithNodeTaintsPolicy sets the NodeTaintsPolicy field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the NodeTaintsPolicy field is set to the value of the last call. +func (b *TopologySpreadConstraintApplyConfiguration) WithNodeTaintsPolicy(value corev1.NodeInclusionPolicy) *TopologySpreadConstraintApplyConfiguration { + b.CoreV1TopologySpreadConstraintApplyConfiguration.NodeTaintsPolicy = &value + return b +} + +// WithMatchLabelKeys adds the given value to the MatchLabelKeys field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the MatchLabelKeys field. +func (b *TopologySpreadConstraintApplyConfiguration) WithMatchLabelKeys(values ...string) *TopologySpreadConstraintApplyConfiguration { + for i := range values { + b.CoreV1TopologySpreadConstraintApplyConfiguration.MatchLabelKeys = append(b.CoreV1TopologySpreadConstraintApplyConfiguration.MatchLabelKeys, values[i]) + } + return b +} + +// WithAdditionalLabelSelectors sets the AdditionalLabelSelectors field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the AdditionalLabelSelectors field is set to the value of the last call. +func (b *TopologySpreadConstraintApplyConfiguration) WithAdditionalLabelSelectors(value monitoringv1.AdditionalLabelSelectors) *TopologySpreadConstraintApplyConfiguration { + b.AdditionalLabelSelectors = &value + return b +} diff --git a/vendor/github.com/prometheus-operator/prometheus-operator/pkg/client/applyconfiguration/monitoring/v1/tracingconfig.go b/vendor/github.com/prometheus-operator/prometheus-operator/pkg/client/applyconfiguration/monitoring/v1/tracingconfig.go new file mode 100644 index 0000000000..a75c8f1884 --- /dev/null +++ b/vendor/github.com/prometheus-operator/prometheus-operator/pkg/client/applyconfiguration/monitoring/v1/tracingconfig.go @@ -0,0 +1,111 @@ +// Copyright The prometheus-operator Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1 + +import ( + monitoringv1 "github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring/v1" + resource "k8s.io/apimachinery/pkg/api/resource" +) + +// TracingConfigApplyConfiguration represents a declarative configuration of the TracingConfig type for use +// with apply. +type TracingConfigApplyConfiguration struct { + ClientType *string `json:"clientType,omitempty"` + Endpoint *string `json:"endpoint,omitempty"` + SamplingFraction *resource.Quantity `json:"samplingFraction,omitempty"` + Insecure *bool `json:"insecure,omitempty"` + Headers map[string]string `json:"headers,omitempty"` + Compression *string `json:"compression,omitempty"` + Timeout *monitoringv1.Duration `json:"timeout,omitempty"` + TLSConfig *TLSConfigApplyConfiguration `json:"tlsConfig,omitempty"` +} + +// TracingConfigApplyConfiguration constructs a declarative configuration of the TracingConfig type for use with +// apply. +func TracingConfig() *TracingConfigApplyConfiguration { + return &TracingConfigApplyConfiguration{} +} + +// WithClientType sets the ClientType field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the ClientType field is set to the value of the last call. +func (b *TracingConfigApplyConfiguration) WithClientType(value string) *TracingConfigApplyConfiguration { + b.ClientType = &value + return b +} + +// WithEndpoint sets the Endpoint field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Endpoint field is set to the value of the last call. +func (b *TracingConfigApplyConfiguration) WithEndpoint(value string) *TracingConfigApplyConfiguration { + b.Endpoint = &value + return b +} + +// WithSamplingFraction sets the SamplingFraction field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the SamplingFraction field is set to the value of the last call. +func (b *TracingConfigApplyConfiguration) WithSamplingFraction(value resource.Quantity) *TracingConfigApplyConfiguration { + b.SamplingFraction = &value + return b +} + +// WithInsecure sets the Insecure field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Insecure field is set to the value of the last call. +func (b *TracingConfigApplyConfiguration) WithInsecure(value bool) *TracingConfigApplyConfiguration { + b.Insecure = &value + return b +} + +// WithHeaders puts the entries into the Headers field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, the entries provided by each call will be put on the Headers field, +// overwriting an existing map entries in Headers field with the same key. +func (b *TracingConfigApplyConfiguration) WithHeaders(entries map[string]string) *TracingConfigApplyConfiguration { + if b.Headers == nil && len(entries) > 0 { + b.Headers = make(map[string]string, len(entries)) + } + for k, v := range entries { + b.Headers[k] = v + } + return b +} + +// WithCompression sets the Compression field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Compression field is set to the value of the last call. +func (b *TracingConfigApplyConfiguration) WithCompression(value string) *TracingConfigApplyConfiguration { + b.Compression = &value + return b +} + +// WithTimeout sets the Timeout field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Timeout field is set to the value of the last call. +func (b *TracingConfigApplyConfiguration) WithTimeout(value monitoringv1.Duration) *TracingConfigApplyConfiguration { + b.Timeout = &value + return b +} + +// WithTLSConfig sets the TLSConfig field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the TLSConfig field is set to the value of the last call. +func (b *TracingConfigApplyConfiguration) WithTLSConfig(value *TLSConfigApplyConfiguration) *TracingConfigApplyConfiguration { + b.TLSConfig = value + return b +} diff --git a/vendor/github.com/prometheus-operator/prometheus-operator/pkg/client/applyconfiguration/monitoring/v1/tsdbspec.go b/vendor/github.com/prometheus-operator/prometheus-operator/pkg/client/applyconfiguration/monitoring/v1/tsdbspec.go new file mode 100644 index 0000000000..2479235911 --- /dev/null +++ b/vendor/github.com/prometheus-operator/prometheus-operator/pkg/client/applyconfiguration/monitoring/v1/tsdbspec.go @@ -0,0 +1,41 @@ +// Copyright The prometheus-operator Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1 + +import ( + monitoringv1 "github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring/v1" +) + +// TSDBSpecApplyConfiguration represents a declarative configuration of the TSDBSpec type for use +// with apply. +type TSDBSpecApplyConfiguration struct { + OutOfOrderTimeWindow *monitoringv1.Duration `json:"outOfOrderTimeWindow,omitempty"` +} + +// TSDBSpecApplyConfiguration constructs a declarative configuration of the TSDBSpec type for use with +// apply. +func TSDBSpec() *TSDBSpecApplyConfiguration { + return &TSDBSpecApplyConfiguration{} +} + +// WithOutOfOrderTimeWindow sets the OutOfOrderTimeWindow field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the OutOfOrderTimeWindow field is set to the value of the last call. +func (b *TSDBSpecApplyConfiguration) WithOutOfOrderTimeWindow(value monitoringv1.Duration) *TSDBSpecApplyConfiguration { + b.OutOfOrderTimeWindow = &value + return b +} diff --git a/vendor/github.com/prometheus-operator/prometheus-operator/pkg/client/applyconfiguration/monitoring/v1/webconfigfilefields.go b/vendor/github.com/prometheus-operator/prometheus-operator/pkg/client/applyconfiguration/monitoring/v1/webconfigfilefields.go new file mode 100644 index 0000000000..0c37e74cc0 --- /dev/null +++ b/vendor/github.com/prometheus-operator/prometheus-operator/pkg/client/applyconfiguration/monitoring/v1/webconfigfilefields.go @@ -0,0 +1,46 @@ +// Copyright The prometheus-operator Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1 + +// WebConfigFileFieldsApplyConfiguration represents a declarative configuration of the WebConfigFileFields type for use +// with apply. +type WebConfigFileFieldsApplyConfiguration struct { + TLSConfig *WebTLSConfigApplyConfiguration `json:"tlsConfig,omitempty"` + HTTPConfig *WebHTTPConfigApplyConfiguration `json:"httpConfig,omitempty"` +} + +// WebConfigFileFieldsApplyConfiguration constructs a declarative configuration of the WebConfigFileFields type for use with +// apply. +func WebConfigFileFields() *WebConfigFileFieldsApplyConfiguration { + return &WebConfigFileFieldsApplyConfiguration{} +} + +// WithTLSConfig sets the TLSConfig field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the TLSConfig field is set to the value of the last call. +func (b *WebConfigFileFieldsApplyConfiguration) WithTLSConfig(value *WebTLSConfigApplyConfiguration) *WebConfigFileFieldsApplyConfiguration { + b.TLSConfig = value + return b +} + +// WithHTTPConfig sets the HTTPConfig field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the HTTPConfig field is set to the value of the last call. +func (b *WebConfigFileFieldsApplyConfiguration) WithHTTPConfig(value *WebHTTPConfigApplyConfiguration) *WebConfigFileFieldsApplyConfiguration { + b.HTTPConfig = value + return b +} diff --git a/vendor/github.com/prometheus-operator/prometheus-operator/pkg/client/applyconfiguration/monitoring/v1/webhttpconfig.go b/vendor/github.com/prometheus-operator/prometheus-operator/pkg/client/applyconfiguration/monitoring/v1/webhttpconfig.go new file mode 100644 index 0000000000..1f7f130d46 --- /dev/null +++ b/vendor/github.com/prometheus-operator/prometheus-operator/pkg/client/applyconfiguration/monitoring/v1/webhttpconfig.go @@ -0,0 +1,46 @@ +// Copyright The prometheus-operator Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1 + +// WebHTTPConfigApplyConfiguration represents a declarative configuration of the WebHTTPConfig type for use +// with apply. +type WebHTTPConfigApplyConfiguration struct { + HTTP2 *bool `json:"http2,omitempty"` + Headers *WebHTTPHeadersApplyConfiguration `json:"headers,omitempty"` +} + +// WebHTTPConfigApplyConfiguration constructs a declarative configuration of the WebHTTPConfig type for use with +// apply. +func WebHTTPConfig() *WebHTTPConfigApplyConfiguration { + return &WebHTTPConfigApplyConfiguration{} +} + +// WithHTTP2 sets the HTTP2 field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the HTTP2 field is set to the value of the last call. +func (b *WebHTTPConfigApplyConfiguration) WithHTTP2(value bool) *WebHTTPConfigApplyConfiguration { + b.HTTP2 = &value + return b +} + +// WithHeaders sets the Headers field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Headers field is set to the value of the last call. +func (b *WebHTTPConfigApplyConfiguration) WithHeaders(value *WebHTTPHeadersApplyConfiguration) *WebHTTPConfigApplyConfiguration { + b.Headers = value + return b +} diff --git a/vendor/github.com/prometheus-operator/prometheus-operator/pkg/client/applyconfiguration/monitoring/v1/webhttpheaders.go b/vendor/github.com/prometheus-operator/prometheus-operator/pkg/client/applyconfiguration/monitoring/v1/webhttpheaders.go new file mode 100644 index 0000000000..483b88d494 --- /dev/null +++ b/vendor/github.com/prometheus-operator/prometheus-operator/pkg/client/applyconfiguration/monitoring/v1/webhttpheaders.go @@ -0,0 +1,73 @@ +// Copyright The prometheus-operator Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1 + +// WebHTTPHeadersApplyConfiguration represents a declarative configuration of the WebHTTPHeaders type for use +// with apply. +type WebHTTPHeadersApplyConfiguration struct { + ContentSecurityPolicy *string `json:"contentSecurityPolicy,omitempty"` + XFrameOptions *string `json:"xFrameOptions,omitempty"` + XContentTypeOptions *string `json:"xContentTypeOptions,omitempty"` + XXSSProtection *string `json:"xXSSProtection,omitempty"` + StrictTransportSecurity *string `json:"strictTransportSecurity,omitempty"` +} + +// WebHTTPHeadersApplyConfiguration constructs a declarative configuration of the WebHTTPHeaders type for use with +// apply. +func WebHTTPHeaders() *WebHTTPHeadersApplyConfiguration { + return &WebHTTPHeadersApplyConfiguration{} +} + +// WithContentSecurityPolicy sets the ContentSecurityPolicy field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the ContentSecurityPolicy field is set to the value of the last call. +func (b *WebHTTPHeadersApplyConfiguration) WithContentSecurityPolicy(value string) *WebHTTPHeadersApplyConfiguration { + b.ContentSecurityPolicy = &value + return b +} + +// WithXFrameOptions sets the XFrameOptions field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the XFrameOptions field is set to the value of the last call. +func (b *WebHTTPHeadersApplyConfiguration) WithXFrameOptions(value string) *WebHTTPHeadersApplyConfiguration { + b.XFrameOptions = &value + return b +} + +// WithXContentTypeOptions sets the XContentTypeOptions field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the XContentTypeOptions field is set to the value of the last call. +func (b *WebHTTPHeadersApplyConfiguration) WithXContentTypeOptions(value string) *WebHTTPHeadersApplyConfiguration { + b.XContentTypeOptions = &value + return b +} + +// WithXXSSProtection sets the XXSSProtection field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the XXSSProtection field is set to the value of the last call. +func (b *WebHTTPHeadersApplyConfiguration) WithXXSSProtection(value string) *WebHTTPHeadersApplyConfiguration { + b.XXSSProtection = &value + return b +} + +// WithStrictTransportSecurity sets the StrictTransportSecurity field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the StrictTransportSecurity field is set to the value of the last call. +func (b *WebHTTPHeadersApplyConfiguration) WithStrictTransportSecurity(value string) *WebHTTPHeadersApplyConfiguration { + b.StrictTransportSecurity = &value + return b +} diff --git a/vendor/github.com/prometheus-operator/prometheus-operator/pkg/client/applyconfiguration/monitoring/v1/webtlsconfig.go b/vendor/github.com/prometheus-operator/prometheus-operator/pkg/client/applyconfiguration/monitoring/v1/webtlsconfig.go new file mode 100644 index 0000000000..5e9123cb97 --- /dev/null +++ b/vendor/github.com/prometheus-operator/prometheus-operator/pkg/client/applyconfiguration/monitoring/v1/webtlsconfig.go @@ -0,0 +1,144 @@ +// Copyright The prometheus-operator Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1 + +import ( + corev1 "k8s.io/api/core/v1" +) + +// WebTLSConfigApplyConfiguration represents a declarative configuration of the WebTLSConfig type for use +// with apply. +type WebTLSConfigApplyConfiguration struct { + Cert *SecretOrConfigMapApplyConfiguration `json:"cert,omitempty"` + CertFile *string `json:"certFile,omitempty"` + KeySecret *corev1.SecretKeySelector `json:"keySecret,omitempty"` + KeyFile *string `json:"keyFile,omitempty"` + ClientCA *SecretOrConfigMapApplyConfiguration `json:"client_ca,omitempty"` + ClientCAFile *string `json:"clientCAFile,omitempty"` + ClientAuthType *string `json:"clientAuthType,omitempty"` + MinVersion *string `json:"minVersion,omitempty"` + MaxVersion *string `json:"maxVersion,omitempty"` + CipherSuites []string `json:"cipherSuites,omitempty"` + PreferServerCipherSuites *bool `json:"preferServerCipherSuites,omitempty"` + CurvePreferences []string `json:"curvePreferences,omitempty"` +} + +// WebTLSConfigApplyConfiguration constructs a declarative configuration of the WebTLSConfig type for use with +// apply. +func WebTLSConfig() *WebTLSConfigApplyConfiguration { + return &WebTLSConfigApplyConfiguration{} +} + +// WithCert sets the Cert field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Cert field is set to the value of the last call. +func (b *WebTLSConfigApplyConfiguration) WithCert(value *SecretOrConfigMapApplyConfiguration) *WebTLSConfigApplyConfiguration { + b.Cert = value + return b +} + +// WithCertFile sets the CertFile field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the CertFile field is set to the value of the last call. +func (b *WebTLSConfigApplyConfiguration) WithCertFile(value string) *WebTLSConfigApplyConfiguration { + b.CertFile = &value + return b +} + +// WithKeySecret sets the KeySecret field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the KeySecret field is set to the value of the last call. +func (b *WebTLSConfigApplyConfiguration) WithKeySecret(value corev1.SecretKeySelector) *WebTLSConfigApplyConfiguration { + b.KeySecret = &value + return b +} + +// WithKeyFile sets the KeyFile field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the KeyFile field is set to the value of the last call. +func (b *WebTLSConfigApplyConfiguration) WithKeyFile(value string) *WebTLSConfigApplyConfiguration { + b.KeyFile = &value + return b +} + +// WithClientCA sets the ClientCA field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the ClientCA field is set to the value of the last call. +func (b *WebTLSConfigApplyConfiguration) WithClientCA(value *SecretOrConfigMapApplyConfiguration) *WebTLSConfigApplyConfiguration { + b.ClientCA = value + return b +} + +// WithClientCAFile sets the ClientCAFile field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the ClientCAFile field is set to the value of the last call. +func (b *WebTLSConfigApplyConfiguration) WithClientCAFile(value string) *WebTLSConfigApplyConfiguration { + b.ClientCAFile = &value + return b +} + +// WithClientAuthType sets the ClientAuthType field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the ClientAuthType field is set to the value of the last call. +func (b *WebTLSConfigApplyConfiguration) WithClientAuthType(value string) *WebTLSConfigApplyConfiguration { + b.ClientAuthType = &value + return b +} + +// WithMinVersion sets the MinVersion field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the MinVersion field is set to the value of the last call. +func (b *WebTLSConfigApplyConfiguration) WithMinVersion(value string) *WebTLSConfigApplyConfiguration { + b.MinVersion = &value + return b +} + +// WithMaxVersion sets the MaxVersion field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the MaxVersion field is set to the value of the last call. +func (b *WebTLSConfigApplyConfiguration) WithMaxVersion(value string) *WebTLSConfigApplyConfiguration { + b.MaxVersion = &value + return b +} + +// WithCipherSuites adds the given value to the CipherSuites field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the CipherSuites field. +func (b *WebTLSConfigApplyConfiguration) WithCipherSuites(values ...string) *WebTLSConfigApplyConfiguration { + for i := range values { + b.CipherSuites = append(b.CipherSuites, values[i]) + } + return b +} + +// WithPreferServerCipherSuites sets the PreferServerCipherSuites field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the PreferServerCipherSuites field is set to the value of the last call. +func (b *WebTLSConfigApplyConfiguration) WithPreferServerCipherSuites(value bool) *WebTLSConfigApplyConfiguration { + b.PreferServerCipherSuites = &value + return b +} + +// WithCurvePreferences adds the given value to the CurvePreferences field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the CurvePreferences field. +func (b *WebTLSConfigApplyConfiguration) WithCurvePreferences(values ...string) *WebTLSConfigApplyConfiguration { + for i := range values { + b.CurvePreferences = append(b.CurvePreferences, values[i]) + } + return b +} diff --git a/vendor/github.com/prometheus-operator/prometheus-operator/pkg/client/applyconfiguration/monitoring/v1/workloadbinding.go b/vendor/github.com/prometheus-operator/prometheus-operator/pkg/client/applyconfiguration/monitoring/v1/workloadbinding.go new file mode 100644 index 0000000000..9aab9dd87e --- /dev/null +++ b/vendor/github.com/prometheus-operator/prometheus-operator/pkg/client/applyconfiguration/monitoring/v1/workloadbinding.go @@ -0,0 +1,78 @@ +// Copyright The prometheus-operator Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1 + +// WorkloadBindingApplyConfiguration represents a declarative configuration of the WorkloadBinding type for use +// with apply. +type WorkloadBindingApplyConfiguration struct { + Group *string `json:"group,omitempty"` + Resource *string `json:"resource,omitempty"` + Name *string `json:"name,omitempty"` + Namespace *string `json:"namespace,omitempty"` + Conditions []ConfigResourceConditionApplyConfiguration `json:"conditions,omitempty"` +} + +// WorkloadBindingApplyConfiguration constructs a declarative configuration of the WorkloadBinding type for use with +// apply. +func WorkloadBinding() *WorkloadBindingApplyConfiguration { + return &WorkloadBindingApplyConfiguration{} +} + +// WithGroup sets the Group field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Group field is set to the value of the last call. +func (b *WorkloadBindingApplyConfiguration) WithGroup(value string) *WorkloadBindingApplyConfiguration { + b.Group = &value + return b +} + +// WithResource sets the Resource field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Resource field is set to the value of the last call. +func (b *WorkloadBindingApplyConfiguration) WithResource(value string) *WorkloadBindingApplyConfiguration { + b.Resource = &value + return b +} + +// WithName sets the Name field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Name field is set to the value of the last call. +func (b *WorkloadBindingApplyConfiguration) WithName(value string) *WorkloadBindingApplyConfiguration { + b.Name = &value + return b +} + +// WithNamespace sets the Namespace field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Namespace field is set to the value of the last call. +func (b *WorkloadBindingApplyConfiguration) WithNamespace(value string) *WorkloadBindingApplyConfiguration { + b.Namespace = &value + return b +} + +// WithConditions adds the given value to the Conditions field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the Conditions field. +func (b *WorkloadBindingApplyConfiguration) WithConditions(values ...*ConfigResourceConditionApplyConfiguration) *WorkloadBindingApplyConfiguration { + for i := range values { + if values[i] == nil { + panic("nil value passed to WithConditions") + } + b.Conditions = append(b.Conditions, *values[i]) + } + return b +} diff --git a/vendor/github.com/prometheus-operator/prometheus-operator/pkg/client/versioned/scheme/doc.go b/vendor/github.com/prometheus-operator/prometheus-operator/pkg/client/versioned/scheme/doc.go new file mode 100644 index 0000000000..c43a39bdce --- /dev/null +++ b/vendor/github.com/prometheus-operator/prometheus-operator/pkg/client/versioned/scheme/doc.go @@ -0,0 +1,18 @@ +// Copyright The prometheus-operator Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Code generated by client-gen. DO NOT EDIT. + +// This package contains the scheme of the automatically generated clientset. +package scheme diff --git a/vendor/github.com/prometheus-operator/prometheus-operator/pkg/client/versioned/scheme/register.go b/vendor/github.com/prometheus-operator/prometheus-operator/pkg/client/versioned/scheme/register.go new file mode 100644 index 0000000000..6952498c28 --- /dev/null +++ b/vendor/github.com/prometheus-operator/prometheus-operator/pkg/client/versioned/scheme/register.go @@ -0,0 +1,58 @@ +// Copyright The prometheus-operator Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Code generated by client-gen. DO NOT EDIT. + +package scheme + +import ( + monitoringv1 "github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring/v1" + monitoringv1alpha1 "github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring/v1alpha1" + monitoringv1beta1 "github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring/v1beta1" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + runtime "k8s.io/apimachinery/pkg/runtime" + schema "k8s.io/apimachinery/pkg/runtime/schema" + serializer "k8s.io/apimachinery/pkg/runtime/serializer" + utilruntime "k8s.io/apimachinery/pkg/util/runtime" +) + +var Scheme = runtime.NewScheme() +var Codecs = serializer.NewCodecFactory(Scheme) +var ParameterCodec = runtime.NewParameterCodec(Scheme) +var localSchemeBuilder = runtime.SchemeBuilder{ + monitoringv1.AddToScheme, + monitoringv1beta1.AddToScheme, + monitoringv1alpha1.AddToScheme, +} + +// AddToScheme adds all types of this clientset into the given scheme. This allows composition +// of clientsets, like in: +// +// import ( +// "k8s.io/client-go/kubernetes" +// clientsetscheme "k8s.io/client-go/kubernetes/scheme" +// aggregatorclientsetscheme "k8s.io/kube-aggregator/pkg/client/clientset_generated/clientset/scheme" +// ) +// +// kclientset, _ := kubernetes.NewForConfig(c) +// _ = aggregatorclientsetscheme.AddToScheme(clientsetscheme.Scheme) +// +// After this, RawExtensions in Kubernetes types will serialize kube-aggregator types +// correctly. +var AddToScheme = localSchemeBuilder.AddToScheme + +func init() { + v1.AddToGroupVersion(Scheme, schema.GroupVersion{Version: "v1"}) + utilruntime.Must(AddToScheme(Scheme)) +} diff --git a/vendor/github.com/prometheus-operator/prometheus-operator/pkg/client/versioned/typed/monitoring/v1/alertmanager.go b/vendor/github.com/prometheus-operator/prometheus-operator/pkg/client/versioned/typed/monitoring/v1/alertmanager.go new file mode 100644 index 0000000000..b4efe3926a --- /dev/null +++ b/vendor/github.com/prometheus-operator/prometheus-operator/pkg/client/versioned/typed/monitoring/v1/alertmanager.go @@ -0,0 +1,105 @@ +// Copyright The prometheus-operator Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Code generated by client-gen. DO NOT EDIT. + +package v1 + +import ( + context "context" + + monitoringv1 "github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring/v1" + applyconfigurationmonitoringv1 "github.com/prometheus-operator/prometheus-operator/pkg/client/applyconfiguration/monitoring/v1" + scheme "github.com/prometheus-operator/prometheus-operator/pkg/client/versioned/scheme" + autoscalingv1 "k8s.io/api/autoscaling/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + types "k8s.io/apimachinery/pkg/types" + watch "k8s.io/apimachinery/pkg/watch" + gentype "k8s.io/client-go/gentype" +) + +// AlertmanagersGetter has a method to return a AlertmanagerInterface. +// A group's client should implement this interface. +type AlertmanagersGetter interface { + Alertmanagers(namespace string) AlertmanagerInterface +} + +// AlertmanagerInterface has methods to work with Alertmanager resources. +type AlertmanagerInterface interface { + Create(ctx context.Context, alertmanager *monitoringv1.Alertmanager, opts metav1.CreateOptions) (*monitoringv1.Alertmanager, error) + Update(ctx context.Context, alertmanager *monitoringv1.Alertmanager, opts metav1.UpdateOptions) (*monitoringv1.Alertmanager, error) + // Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). + UpdateStatus(ctx context.Context, alertmanager *monitoringv1.Alertmanager, opts metav1.UpdateOptions) (*monitoringv1.Alertmanager, error) + Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error + DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error + Get(ctx context.Context, name string, opts metav1.GetOptions) (*monitoringv1.Alertmanager, error) + List(ctx context.Context, opts metav1.ListOptions) (*monitoringv1.AlertmanagerList, error) + Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) + Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *monitoringv1.Alertmanager, err error) + Apply(ctx context.Context, alertmanager *applyconfigurationmonitoringv1.AlertmanagerApplyConfiguration, opts metav1.ApplyOptions) (result *monitoringv1.Alertmanager, err error) + // Add a +genclient:noStatus comment above the type to avoid generating ApplyStatus(). + ApplyStatus(ctx context.Context, alertmanager *applyconfigurationmonitoringv1.AlertmanagerApplyConfiguration, opts metav1.ApplyOptions) (result *monitoringv1.Alertmanager, err error) + GetScale(ctx context.Context, alertmanagerName string, options metav1.GetOptions) (*autoscalingv1.Scale, error) + UpdateScale(ctx context.Context, alertmanagerName string, scale *autoscalingv1.Scale, opts metav1.UpdateOptions) (*autoscalingv1.Scale, error) + + AlertmanagerExpansion +} + +// alertmanagers implements AlertmanagerInterface +type alertmanagers struct { + *gentype.ClientWithListAndApply[*monitoringv1.Alertmanager, *monitoringv1.AlertmanagerList, *applyconfigurationmonitoringv1.AlertmanagerApplyConfiguration] +} + +// newAlertmanagers returns a Alertmanagers +func newAlertmanagers(c *MonitoringV1Client, namespace string) *alertmanagers { + return &alertmanagers{ + gentype.NewClientWithListAndApply[*monitoringv1.Alertmanager, *monitoringv1.AlertmanagerList, *applyconfigurationmonitoringv1.AlertmanagerApplyConfiguration]( + "alertmanagers", + c.RESTClient(), + scheme.ParameterCodec, + namespace, + func() *monitoringv1.Alertmanager { return &monitoringv1.Alertmanager{} }, + func() *monitoringv1.AlertmanagerList { return &monitoringv1.AlertmanagerList{} }, + ), + } +} + +// GetScale takes name of the alertmanager, and returns the corresponding autoscalingv1.Scale object, and an error if there is any. +func (c *alertmanagers) GetScale(ctx context.Context, alertmanagerName string, options metav1.GetOptions) (result *autoscalingv1.Scale, err error) { + result = &autoscalingv1.Scale{} + err = c.GetClient().Get(). + Namespace(c.GetNamespace()). + Resource("alertmanagers"). + Name(alertmanagerName). + SubResource("scale"). + VersionedParams(&options, scheme.ParameterCodec). + Do(ctx). + Into(result) + return +} + +// UpdateScale takes the top resource name and the representation of a scale and updates it. Returns the server's representation of the scale, and an error, if there is any. +func (c *alertmanagers) UpdateScale(ctx context.Context, alertmanagerName string, scale *autoscalingv1.Scale, opts metav1.UpdateOptions) (result *autoscalingv1.Scale, err error) { + result = &autoscalingv1.Scale{} + err = c.GetClient().Put(). + Namespace(c.GetNamespace()). + Resource("alertmanagers"). + Name(alertmanagerName). + SubResource("scale"). + VersionedParams(&opts, scheme.ParameterCodec). + Body(scale). + Do(ctx). + Into(result) + return +} diff --git a/vendor/github.com/prometheus-operator/prometheus-operator/pkg/client/versioned/typed/monitoring/v1/doc.go b/vendor/github.com/prometheus-operator/prometheus-operator/pkg/client/versioned/typed/monitoring/v1/doc.go new file mode 100644 index 0000000000..38db2725c7 --- /dev/null +++ b/vendor/github.com/prometheus-operator/prometheus-operator/pkg/client/versioned/typed/monitoring/v1/doc.go @@ -0,0 +1,18 @@ +// Copyright The prometheus-operator Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Code generated by client-gen. DO NOT EDIT. + +// This package has the automatically generated typed clients. +package v1 diff --git a/vendor/github.com/prometheus-operator/prometheus-operator/pkg/client/versioned/typed/monitoring/v1/generated_expansion.go b/vendor/github.com/prometheus-operator/prometheus-operator/pkg/client/versioned/typed/monitoring/v1/generated_expansion.go new file mode 100644 index 0000000000..88d0d4f76b --- /dev/null +++ b/vendor/github.com/prometheus-operator/prometheus-operator/pkg/client/versioned/typed/monitoring/v1/generated_expansion.go @@ -0,0 +1,31 @@ +// Copyright The prometheus-operator Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Code generated by client-gen. DO NOT EDIT. + +package v1 + +type AlertmanagerExpansion interface{} + +type PodMonitorExpansion interface{} + +type ProbeExpansion interface{} + +type PrometheusExpansion interface{} + +type PrometheusRuleExpansion interface{} + +type ServiceMonitorExpansion interface{} + +type ThanosRulerExpansion interface{} diff --git a/vendor/github.com/prometheus-operator/prometheus-operator/pkg/client/versioned/typed/monitoring/v1/monitoring_client.go b/vendor/github.com/prometheus-operator/prometheus-operator/pkg/client/versioned/typed/monitoring/v1/monitoring_client.go new file mode 100644 index 0000000000..7666d1522f --- /dev/null +++ b/vendor/github.com/prometheus-operator/prometheus-operator/pkg/client/versioned/typed/monitoring/v1/monitoring_client.go @@ -0,0 +1,129 @@ +// Copyright The prometheus-operator Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Code generated by client-gen. DO NOT EDIT. + +package v1 + +import ( + http "net/http" + + monitoringv1 "github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring/v1" + scheme "github.com/prometheus-operator/prometheus-operator/pkg/client/versioned/scheme" + rest "k8s.io/client-go/rest" +) + +type MonitoringV1Interface interface { + RESTClient() rest.Interface + AlertmanagersGetter + PodMonitorsGetter + ProbesGetter + PrometheusesGetter + PrometheusRulesGetter + ServiceMonitorsGetter + ThanosRulersGetter +} + +// MonitoringV1Client is used to interact with features provided by the monitoring.coreos.com group. +type MonitoringV1Client struct { + restClient rest.Interface +} + +func (c *MonitoringV1Client) Alertmanagers(namespace string) AlertmanagerInterface { + return newAlertmanagers(c, namespace) +} + +func (c *MonitoringV1Client) PodMonitors(namespace string) PodMonitorInterface { + return newPodMonitors(c, namespace) +} + +func (c *MonitoringV1Client) Probes(namespace string) ProbeInterface { + return newProbes(c, namespace) +} + +func (c *MonitoringV1Client) Prometheuses(namespace string) PrometheusInterface { + return newPrometheuses(c, namespace) +} + +func (c *MonitoringV1Client) PrometheusRules(namespace string) PrometheusRuleInterface { + return newPrometheusRules(c, namespace) +} + +func (c *MonitoringV1Client) ServiceMonitors(namespace string) ServiceMonitorInterface { + return newServiceMonitors(c, namespace) +} + +func (c *MonitoringV1Client) ThanosRulers(namespace string) ThanosRulerInterface { + return newThanosRulers(c, namespace) +} + +// NewForConfig creates a new MonitoringV1Client for the given config. +// NewForConfig is equivalent to NewForConfigAndClient(c, httpClient), +// where httpClient was generated with rest.HTTPClientFor(c). +func NewForConfig(c *rest.Config) (*MonitoringV1Client, error) { + config := *c + setConfigDefaults(&config) + httpClient, err := rest.HTTPClientFor(&config) + if err != nil { + return nil, err + } + return NewForConfigAndClient(&config, httpClient) +} + +// NewForConfigAndClient creates a new MonitoringV1Client for the given config and http client. +// Note the http client provided takes precedence over the configured transport values. +func NewForConfigAndClient(c *rest.Config, h *http.Client) (*MonitoringV1Client, error) { + config := *c + setConfigDefaults(&config) + client, err := rest.RESTClientForConfigAndClient(&config, h) + if err != nil { + return nil, err + } + return &MonitoringV1Client{client}, nil +} + +// NewForConfigOrDie creates a new MonitoringV1Client for the given config and +// panics if there is an error in the config. +func NewForConfigOrDie(c *rest.Config) *MonitoringV1Client { + client, err := NewForConfig(c) + if err != nil { + panic(err) + } + return client +} + +// New creates a new MonitoringV1Client for the given RESTClient. +func New(c rest.Interface) *MonitoringV1Client { + return &MonitoringV1Client{c} +} + +func setConfigDefaults(config *rest.Config) { + gv := monitoringv1.SchemeGroupVersion + config.GroupVersion = &gv + config.APIPath = "/apis" + config.NegotiatedSerializer = rest.CodecFactoryForGeneratedClient(scheme.Scheme, scheme.Codecs).WithoutConversion() + + if config.UserAgent == "" { + config.UserAgent = rest.DefaultKubernetesUserAgent() + } +} + +// RESTClient returns a RESTClient that is used to communicate +// with API server by this client implementation. +func (c *MonitoringV1Client) RESTClient() rest.Interface { + if c == nil { + return nil + } + return c.restClient +} diff --git a/vendor/github.com/prometheus-operator/prometheus-operator/pkg/client/versioned/typed/monitoring/v1/podmonitor.go b/vendor/github.com/prometheus-operator/prometheus-operator/pkg/client/versioned/typed/monitoring/v1/podmonitor.go new file mode 100644 index 0000000000..83e71948cf --- /dev/null +++ b/vendor/github.com/prometheus-operator/prometheus-operator/pkg/client/versioned/typed/monitoring/v1/podmonitor.go @@ -0,0 +1,72 @@ +// Copyright The prometheus-operator Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Code generated by client-gen. DO NOT EDIT. + +package v1 + +import ( + context "context" + + monitoringv1 "github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring/v1" + applyconfigurationmonitoringv1 "github.com/prometheus-operator/prometheus-operator/pkg/client/applyconfiguration/monitoring/v1" + scheme "github.com/prometheus-operator/prometheus-operator/pkg/client/versioned/scheme" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + types "k8s.io/apimachinery/pkg/types" + watch "k8s.io/apimachinery/pkg/watch" + gentype "k8s.io/client-go/gentype" +) + +// PodMonitorsGetter has a method to return a PodMonitorInterface. +// A group's client should implement this interface. +type PodMonitorsGetter interface { + PodMonitors(namespace string) PodMonitorInterface +} + +// PodMonitorInterface has methods to work with PodMonitor resources. +type PodMonitorInterface interface { + Create(ctx context.Context, podMonitor *monitoringv1.PodMonitor, opts metav1.CreateOptions) (*monitoringv1.PodMonitor, error) + Update(ctx context.Context, podMonitor *monitoringv1.PodMonitor, opts metav1.UpdateOptions) (*monitoringv1.PodMonitor, error) + // Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). + UpdateStatus(ctx context.Context, podMonitor *monitoringv1.PodMonitor, opts metav1.UpdateOptions) (*monitoringv1.PodMonitor, error) + Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error + DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error + Get(ctx context.Context, name string, opts metav1.GetOptions) (*monitoringv1.PodMonitor, error) + List(ctx context.Context, opts metav1.ListOptions) (*monitoringv1.PodMonitorList, error) + Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) + Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *monitoringv1.PodMonitor, err error) + Apply(ctx context.Context, podMonitor *applyconfigurationmonitoringv1.PodMonitorApplyConfiguration, opts metav1.ApplyOptions) (result *monitoringv1.PodMonitor, err error) + // Add a +genclient:noStatus comment above the type to avoid generating ApplyStatus(). + ApplyStatus(ctx context.Context, podMonitor *applyconfigurationmonitoringv1.PodMonitorApplyConfiguration, opts metav1.ApplyOptions) (result *monitoringv1.PodMonitor, err error) + PodMonitorExpansion +} + +// podMonitors implements PodMonitorInterface +type podMonitors struct { + *gentype.ClientWithListAndApply[*monitoringv1.PodMonitor, *monitoringv1.PodMonitorList, *applyconfigurationmonitoringv1.PodMonitorApplyConfiguration] +} + +// newPodMonitors returns a PodMonitors +func newPodMonitors(c *MonitoringV1Client, namespace string) *podMonitors { + return &podMonitors{ + gentype.NewClientWithListAndApply[*monitoringv1.PodMonitor, *monitoringv1.PodMonitorList, *applyconfigurationmonitoringv1.PodMonitorApplyConfiguration]( + "podmonitors", + c.RESTClient(), + scheme.ParameterCodec, + namespace, + func() *monitoringv1.PodMonitor { return &monitoringv1.PodMonitor{} }, + func() *monitoringv1.PodMonitorList { return &monitoringv1.PodMonitorList{} }, + ), + } +} diff --git a/vendor/github.com/prometheus-operator/prometheus-operator/pkg/client/versioned/typed/monitoring/v1/probe.go b/vendor/github.com/prometheus-operator/prometheus-operator/pkg/client/versioned/typed/monitoring/v1/probe.go new file mode 100644 index 0000000000..00b3140df5 --- /dev/null +++ b/vendor/github.com/prometheus-operator/prometheus-operator/pkg/client/versioned/typed/monitoring/v1/probe.go @@ -0,0 +1,72 @@ +// Copyright The prometheus-operator Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Code generated by client-gen. DO NOT EDIT. + +package v1 + +import ( + context "context" + + monitoringv1 "github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring/v1" + applyconfigurationmonitoringv1 "github.com/prometheus-operator/prometheus-operator/pkg/client/applyconfiguration/monitoring/v1" + scheme "github.com/prometheus-operator/prometheus-operator/pkg/client/versioned/scheme" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + types "k8s.io/apimachinery/pkg/types" + watch "k8s.io/apimachinery/pkg/watch" + gentype "k8s.io/client-go/gentype" +) + +// ProbesGetter has a method to return a ProbeInterface. +// A group's client should implement this interface. +type ProbesGetter interface { + Probes(namespace string) ProbeInterface +} + +// ProbeInterface has methods to work with Probe resources. +type ProbeInterface interface { + Create(ctx context.Context, probe *monitoringv1.Probe, opts metav1.CreateOptions) (*monitoringv1.Probe, error) + Update(ctx context.Context, probe *monitoringv1.Probe, opts metav1.UpdateOptions) (*monitoringv1.Probe, error) + // Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). + UpdateStatus(ctx context.Context, probe *monitoringv1.Probe, opts metav1.UpdateOptions) (*monitoringv1.Probe, error) + Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error + DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error + Get(ctx context.Context, name string, opts metav1.GetOptions) (*monitoringv1.Probe, error) + List(ctx context.Context, opts metav1.ListOptions) (*monitoringv1.ProbeList, error) + Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) + Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *monitoringv1.Probe, err error) + Apply(ctx context.Context, probe *applyconfigurationmonitoringv1.ProbeApplyConfiguration, opts metav1.ApplyOptions) (result *monitoringv1.Probe, err error) + // Add a +genclient:noStatus comment above the type to avoid generating ApplyStatus(). + ApplyStatus(ctx context.Context, probe *applyconfigurationmonitoringv1.ProbeApplyConfiguration, opts metav1.ApplyOptions) (result *monitoringv1.Probe, err error) + ProbeExpansion +} + +// probes implements ProbeInterface +type probes struct { + *gentype.ClientWithListAndApply[*monitoringv1.Probe, *monitoringv1.ProbeList, *applyconfigurationmonitoringv1.ProbeApplyConfiguration] +} + +// newProbes returns a Probes +func newProbes(c *MonitoringV1Client, namespace string) *probes { + return &probes{ + gentype.NewClientWithListAndApply[*monitoringv1.Probe, *monitoringv1.ProbeList, *applyconfigurationmonitoringv1.ProbeApplyConfiguration]( + "probes", + c.RESTClient(), + scheme.ParameterCodec, + namespace, + func() *monitoringv1.Probe { return &monitoringv1.Probe{} }, + func() *monitoringv1.ProbeList { return &monitoringv1.ProbeList{} }, + ), + } +} diff --git a/vendor/github.com/prometheus-operator/prometheus-operator/pkg/client/versioned/typed/monitoring/v1/prometheus.go b/vendor/github.com/prometheus-operator/prometheus-operator/pkg/client/versioned/typed/monitoring/v1/prometheus.go new file mode 100644 index 0000000000..4106f4da88 --- /dev/null +++ b/vendor/github.com/prometheus-operator/prometheus-operator/pkg/client/versioned/typed/monitoring/v1/prometheus.go @@ -0,0 +1,105 @@ +// Copyright The prometheus-operator Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Code generated by client-gen. DO NOT EDIT. + +package v1 + +import ( + context "context" + + monitoringv1 "github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring/v1" + applyconfigurationmonitoringv1 "github.com/prometheus-operator/prometheus-operator/pkg/client/applyconfiguration/monitoring/v1" + scheme "github.com/prometheus-operator/prometheus-operator/pkg/client/versioned/scheme" + autoscalingv1 "k8s.io/api/autoscaling/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + types "k8s.io/apimachinery/pkg/types" + watch "k8s.io/apimachinery/pkg/watch" + gentype "k8s.io/client-go/gentype" +) + +// PrometheusesGetter has a method to return a PrometheusInterface. +// A group's client should implement this interface. +type PrometheusesGetter interface { + Prometheuses(namespace string) PrometheusInterface +} + +// PrometheusInterface has methods to work with Prometheus resources. +type PrometheusInterface interface { + Create(ctx context.Context, prometheus *monitoringv1.Prometheus, opts metav1.CreateOptions) (*monitoringv1.Prometheus, error) + Update(ctx context.Context, prometheus *monitoringv1.Prometheus, opts metav1.UpdateOptions) (*monitoringv1.Prometheus, error) + // Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). + UpdateStatus(ctx context.Context, prometheus *monitoringv1.Prometheus, opts metav1.UpdateOptions) (*monitoringv1.Prometheus, error) + Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error + DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error + Get(ctx context.Context, name string, opts metav1.GetOptions) (*monitoringv1.Prometheus, error) + List(ctx context.Context, opts metav1.ListOptions) (*monitoringv1.PrometheusList, error) + Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) + Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *monitoringv1.Prometheus, err error) + Apply(ctx context.Context, prometheus *applyconfigurationmonitoringv1.PrometheusApplyConfiguration, opts metav1.ApplyOptions) (result *monitoringv1.Prometheus, err error) + // Add a +genclient:noStatus comment above the type to avoid generating ApplyStatus(). + ApplyStatus(ctx context.Context, prometheus *applyconfigurationmonitoringv1.PrometheusApplyConfiguration, opts metav1.ApplyOptions) (result *monitoringv1.Prometheus, err error) + GetScale(ctx context.Context, prometheusName string, options metav1.GetOptions) (*autoscalingv1.Scale, error) + UpdateScale(ctx context.Context, prometheusName string, scale *autoscalingv1.Scale, opts metav1.UpdateOptions) (*autoscalingv1.Scale, error) + + PrometheusExpansion +} + +// prometheuses implements PrometheusInterface +type prometheuses struct { + *gentype.ClientWithListAndApply[*monitoringv1.Prometheus, *monitoringv1.PrometheusList, *applyconfigurationmonitoringv1.PrometheusApplyConfiguration] +} + +// newPrometheuses returns a Prometheuses +func newPrometheuses(c *MonitoringV1Client, namespace string) *prometheuses { + return &prometheuses{ + gentype.NewClientWithListAndApply[*monitoringv1.Prometheus, *monitoringv1.PrometheusList, *applyconfigurationmonitoringv1.PrometheusApplyConfiguration]( + "prometheuses", + c.RESTClient(), + scheme.ParameterCodec, + namespace, + func() *monitoringv1.Prometheus { return &monitoringv1.Prometheus{} }, + func() *monitoringv1.PrometheusList { return &monitoringv1.PrometheusList{} }, + ), + } +} + +// GetScale takes name of the prometheus, and returns the corresponding autoscalingv1.Scale object, and an error if there is any. +func (c *prometheuses) GetScale(ctx context.Context, prometheusName string, options metav1.GetOptions) (result *autoscalingv1.Scale, err error) { + result = &autoscalingv1.Scale{} + err = c.GetClient().Get(). + Namespace(c.GetNamespace()). + Resource("prometheuses"). + Name(prometheusName). + SubResource("scale"). + VersionedParams(&options, scheme.ParameterCodec). + Do(ctx). + Into(result) + return +} + +// UpdateScale takes the top resource name and the representation of a scale and updates it. Returns the server's representation of the scale, and an error, if there is any. +func (c *prometheuses) UpdateScale(ctx context.Context, prometheusName string, scale *autoscalingv1.Scale, opts metav1.UpdateOptions) (result *autoscalingv1.Scale, err error) { + result = &autoscalingv1.Scale{} + err = c.GetClient().Put(). + Namespace(c.GetNamespace()). + Resource("prometheuses"). + Name(prometheusName). + SubResource("scale"). + VersionedParams(&opts, scheme.ParameterCodec). + Body(scale). + Do(ctx). + Into(result) + return +} diff --git a/vendor/github.com/prometheus-operator/prometheus-operator/pkg/client/versioned/typed/monitoring/v1/prometheusrule.go b/vendor/github.com/prometheus-operator/prometheus-operator/pkg/client/versioned/typed/monitoring/v1/prometheusrule.go new file mode 100644 index 0000000000..7e9560803b --- /dev/null +++ b/vendor/github.com/prometheus-operator/prometheus-operator/pkg/client/versioned/typed/monitoring/v1/prometheusrule.go @@ -0,0 +1,72 @@ +// Copyright The prometheus-operator Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Code generated by client-gen. DO NOT EDIT. + +package v1 + +import ( + context "context" + + monitoringv1 "github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring/v1" + applyconfigurationmonitoringv1 "github.com/prometheus-operator/prometheus-operator/pkg/client/applyconfiguration/monitoring/v1" + scheme "github.com/prometheus-operator/prometheus-operator/pkg/client/versioned/scheme" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + types "k8s.io/apimachinery/pkg/types" + watch "k8s.io/apimachinery/pkg/watch" + gentype "k8s.io/client-go/gentype" +) + +// PrometheusRulesGetter has a method to return a PrometheusRuleInterface. +// A group's client should implement this interface. +type PrometheusRulesGetter interface { + PrometheusRules(namespace string) PrometheusRuleInterface +} + +// PrometheusRuleInterface has methods to work with PrometheusRule resources. +type PrometheusRuleInterface interface { + Create(ctx context.Context, prometheusRule *monitoringv1.PrometheusRule, opts metav1.CreateOptions) (*monitoringv1.PrometheusRule, error) + Update(ctx context.Context, prometheusRule *monitoringv1.PrometheusRule, opts metav1.UpdateOptions) (*monitoringv1.PrometheusRule, error) + // Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). + UpdateStatus(ctx context.Context, prometheusRule *monitoringv1.PrometheusRule, opts metav1.UpdateOptions) (*monitoringv1.PrometheusRule, error) + Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error + DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error + Get(ctx context.Context, name string, opts metav1.GetOptions) (*monitoringv1.PrometheusRule, error) + List(ctx context.Context, opts metav1.ListOptions) (*monitoringv1.PrometheusRuleList, error) + Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) + Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *monitoringv1.PrometheusRule, err error) + Apply(ctx context.Context, prometheusRule *applyconfigurationmonitoringv1.PrometheusRuleApplyConfiguration, opts metav1.ApplyOptions) (result *monitoringv1.PrometheusRule, err error) + // Add a +genclient:noStatus comment above the type to avoid generating ApplyStatus(). + ApplyStatus(ctx context.Context, prometheusRule *applyconfigurationmonitoringv1.PrometheusRuleApplyConfiguration, opts metav1.ApplyOptions) (result *monitoringv1.PrometheusRule, err error) + PrometheusRuleExpansion +} + +// prometheusRules implements PrometheusRuleInterface +type prometheusRules struct { + *gentype.ClientWithListAndApply[*monitoringv1.PrometheusRule, *monitoringv1.PrometheusRuleList, *applyconfigurationmonitoringv1.PrometheusRuleApplyConfiguration] +} + +// newPrometheusRules returns a PrometheusRules +func newPrometheusRules(c *MonitoringV1Client, namespace string) *prometheusRules { + return &prometheusRules{ + gentype.NewClientWithListAndApply[*monitoringv1.PrometheusRule, *monitoringv1.PrometheusRuleList, *applyconfigurationmonitoringv1.PrometheusRuleApplyConfiguration]( + "prometheusrules", + c.RESTClient(), + scheme.ParameterCodec, + namespace, + func() *monitoringv1.PrometheusRule { return &monitoringv1.PrometheusRule{} }, + func() *monitoringv1.PrometheusRuleList { return &monitoringv1.PrometheusRuleList{} }, + ), + } +} diff --git a/vendor/github.com/prometheus-operator/prometheus-operator/pkg/client/versioned/typed/monitoring/v1/servicemonitor.go b/vendor/github.com/prometheus-operator/prometheus-operator/pkg/client/versioned/typed/monitoring/v1/servicemonitor.go new file mode 100644 index 0000000000..66c66284b2 --- /dev/null +++ b/vendor/github.com/prometheus-operator/prometheus-operator/pkg/client/versioned/typed/monitoring/v1/servicemonitor.go @@ -0,0 +1,72 @@ +// Copyright The prometheus-operator Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Code generated by client-gen. DO NOT EDIT. + +package v1 + +import ( + context "context" + + monitoringv1 "github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring/v1" + applyconfigurationmonitoringv1 "github.com/prometheus-operator/prometheus-operator/pkg/client/applyconfiguration/monitoring/v1" + scheme "github.com/prometheus-operator/prometheus-operator/pkg/client/versioned/scheme" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + types "k8s.io/apimachinery/pkg/types" + watch "k8s.io/apimachinery/pkg/watch" + gentype "k8s.io/client-go/gentype" +) + +// ServiceMonitorsGetter has a method to return a ServiceMonitorInterface. +// A group's client should implement this interface. +type ServiceMonitorsGetter interface { + ServiceMonitors(namespace string) ServiceMonitorInterface +} + +// ServiceMonitorInterface has methods to work with ServiceMonitor resources. +type ServiceMonitorInterface interface { + Create(ctx context.Context, serviceMonitor *monitoringv1.ServiceMonitor, opts metav1.CreateOptions) (*monitoringv1.ServiceMonitor, error) + Update(ctx context.Context, serviceMonitor *monitoringv1.ServiceMonitor, opts metav1.UpdateOptions) (*monitoringv1.ServiceMonitor, error) + // Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). + UpdateStatus(ctx context.Context, serviceMonitor *monitoringv1.ServiceMonitor, opts metav1.UpdateOptions) (*monitoringv1.ServiceMonitor, error) + Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error + DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error + Get(ctx context.Context, name string, opts metav1.GetOptions) (*monitoringv1.ServiceMonitor, error) + List(ctx context.Context, opts metav1.ListOptions) (*monitoringv1.ServiceMonitorList, error) + Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) + Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *monitoringv1.ServiceMonitor, err error) + Apply(ctx context.Context, serviceMonitor *applyconfigurationmonitoringv1.ServiceMonitorApplyConfiguration, opts metav1.ApplyOptions) (result *monitoringv1.ServiceMonitor, err error) + // Add a +genclient:noStatus comment above the type to avoid generating ApplyStatus(). + ApplyStatus(ctx context.Context, serviceMonitor *applyconfigurationmonitoringv1.ServiceMonitorApplyConfiguration, opts metav1.ApplyOptions) (result *monitoringv1.ServiceMonitor, err error) + ServiceMonitorExpansion +} + +// serviceMonitors implements ServiceMonitorInterface +type serviceMonitors struct { + *gentype.ClientWithListAndApply[*monitoringv1.ServiceMonitor, *monitoringv1.ServiceMonitorList, *applyconfigurationmonitoringv1.ServiceMonitorApplyConfiguration] +} + +// newServiceMonitors returns a ServiceMonitors +func newServiceMonitors(c *MonitoringV1Client, namespace string) *serviceMonitors { + return &serviceMonitors{ + gentype.NewClientWithListAndApply[*monitoringv1.ServiceMonitor, *monitoringv1.ServiceMonitorList, *applyconfigurationmonitoringv1.ServiceMonitorApplyConfiguration]( + "servicemonitors", + c.RESTClient(), + scheme.ParameterCodec, + namespace, + func() *monitoringv1.ServiceMonitor { return &monitoringv1.ServiceMonitor{} }, + func() *monitoringv1.ServiceMonitorList { return &monitoringv1.ServiceMonitorList{} }, + ), + } +} diff --git a/vendor/github.com/prometheus-operator/prometheus-operator/pkg/client/versioned/typed/monitoring/v1/thanosruler.go b/vendor/github.com/prometheus-operator/prometheus-operator/pkg/client/versioned/typed/monitoring/v1/thanosruler.go new file mode 100644 index 0000000000..c7a32213b6 --- /dev/null +++ b/vendor/github.com/prometheus-operator/prometheus-operator/pkg/client/versioned/typed/monitoring/v1/thanosruler.go @@ -0,0 +1,72 @@ +// Copyright The prometheus-operator Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Code generated by client-gen. DO NOT EDIT. + +package v1 + +import ( + context "context" + + monitoringv1 "github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring/v1" + applyconfigurationmonitoringv1 "github.com/prometheus-operator/prometheus-operator/pkg/client/applyconfiguration/monitoring/v1" + scheme "github.com/prometheus-operator/prometheus-operator/pkg/client/versioned/scheme" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + types "k8s.io/apimachinery/pkg/types" + watch "k8s.io/apimachinery/pkg/watch" + gentype "k8s.io/client-go/gentype" +) + +// ThanosRulersGetter has a method to return a ThanosRulerInterface. +// A group's client should implement this interface. +type ThanosRulersGetter interface { + ThanosRulers(namespace string) ThanosRulerInterface +} + +// ThanosRulerInterface has methods to work with ThanosRuler resources. +type ThanosRulerInterface interface { + Create(ctx context.Context, thanosRuler *monitoringv1.ThanosRuler, opts metav1.CreateOptions) (*monitoringv1.ThanosRuler, error) + Update(ctx context.Context, thanosRuler *monitoringv1.ThanosRuler, opts metav1.UpdateOptions) (*monitoringv1.ThanosRuler, error) + // Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). + UpdateStatus(ctx context.Context, thanosRuler *monitoringv1.ThanosRuler, opts metav1.UpdateOptions) (*monitoringv1.ThanosRuler, error) + Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error + DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error + Get(ctx context.Context, name string, opts metav1.GetOptions) (*monitoringv1.ThanosRuler, error) + List(ctx context.Context, opts metav1.ListOptions) (*monitoringv1.ThanosRulerList, error) + Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) + Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *monitoringv1.ThanosRuler, err error) + Apply(ctx context.Context, thanosRuler *applyconfigurationmonitoringv1.ThanosRulerApplyConfiguration, opts metav1.ApplyOptions) (result *monitoringv1.ThanosRuler, err error) + // Add a +genclient:noStatus comment above the type to avoid generating ApplyStatus(). + ApplyStatus(ctx context.Context, thanosRuler *applyconfigurationmonitoringv1.ThanosRulerApplyConfiguration, opts metav1.ApplyOptions) (result *monitoringv1.ThanosRuler, err error) + ThanosRulerExpansion +} + +// thanosRulers implements ThanosRulerInterface +type thanosRulers struct { + *gentype.ClientWithListAndApply[*monitoringv1.ThanosRuler, *monitoringv1.ThanosRulerList, *applyconfigurationmonitoringv1.ThanosRulerApplyConfiguration] +} + +// newThanosRulers returns a ThanosRulers +func newThanosRulers(c *MonitoringV1Client, namespace string) *thanosRulers { + return &thanosRulers{ + gentype.NewClientWithListAndApply[*monitoringv1.ThanosRuler, *monitoringv1.ThanosRulerList, *applyconfigurationmonitoringv1.ThanosRulerApplyConfiguration]( + "thanosrulers", + c.RESTClient(), + scheme.ParameterCodec, + namespace, + func() *monitoringv1.ThanosRuler { return &monitoringv1.ThanosRuler{} }, + func() *monitoringv1.ThanosRulerList { return &monitoringv1.ThanosRulerList{} }, + ), + } +} diff --git a/vendor/golang.org/x/oauth2/clientcredentials/clientcredentials.go b/vendor/golang.org/x/oauth2/clientcredentials/clientcredentials.go index 51121a3d52..e86346e8b6 100644 --- a/vendor/golang.org/x/oauth2/clientcredentials/clientcredentials.go +++ b/vendor/golang.org/x/oauth2/clientcredentials/clientcredentials.go @@ -55,7 +55,7 @@ type Config struct { // Token uses client credentials to retrieve a token. // -// The provided context optionally controls which HTTP client is used. See the oauth2.HTTPClient variable. +// The provided context optionally controls which HTTP client is used. See the [oauth2.HTTPClient] variable. func (c *Config) Token(ctx context.Context) (*oauth2.Token, error) { return c.TokenSource(ctx).Token() } @@ -64,18 +64,18 @@ func (c *Config) Token(ctx context.Context) (*oauth2.Token, error) { // The token will auto-refresh as necessary. // // The provided context optionally controls which HTTP client -// is returned. See the oauth2.HTTPClient variable. +// is returned. See the [oauth2.HTTPClient] variable. // -// The returned Client and its Transport should not be modified. +// The returned [http.Client] and its Transport should not be modified. func (c *Config) Client(ctx context.Context) *http.Client { return oauth2.NewClient(ctx, c.TokenSource(ctx)) } -// TokenSource returns a TokenSource that returns t until t expires, +// TokenSource returns a [oauth2.TokenSource] that returns t until t expires, // automatically refreshing it as necessary using the provided context and the // client ID and client secret. // -// Most users will use Config.Client instead. +// Most users will use [Config.Client] instead. func (c *Config) TokenSource(ctx context.Context) oauth2.TokenSource { source := &tokenSource{ ctx: ctx, diff --git a/vendor/golang.org/x/oauth2/internal/doc.go b/vendor/golang.org/x/oauth2/internal/doc.go index 03265e888a..8c7c475f2d 100644 --- a/vendor/golang.org/x/oauth2/internal/doc.go +++ b/vendor/golang.org/x/oauth2/internal/doc.go @@ -2,5 +2,5 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -// Package internal contains support packages for oauth2 package. +// Package internal contains support packages for [golang.org/x/oauth2]. package internal diff --git a/vendor/golang.org/x/oauth2/internal/oauth2.go b/vendor/golang.org/x/oauth2/internal/oauth2.go index 14989beaf4..71ea6ad1f5 100644 --- a/vendor/golang.org/x/oauth2/internal/oauth2.go +++ b/vendor/golang.org/x/oauth2/internal/oauth2.go @@ -13,7 +13,7 @@ import ( ) // ParseKey converts the binary contents of a private key file -// to an *rsa.PrivateKey. It detects whether the private key is in a +// to an [*rsa.PrivateKey]. It detects whether the private key is in a // PEM container or not. If so, it extracts the private key // from PEM container before conversion. It only supports PEM // containers with no passphrase. diff --git a/vendor/golang.org/x/oauth2/internal/token.go b/vendor/golang.org/x/oauth2/internal/token.go index e83ddeef0f..8389f24629 100644 --- a/vendor/golang.org/x/oauth2/internal/token.go +++ b/vendor/golang.org/x/oauth2/internal/token.go @@ -10,7 +10,6 @@ import ( "errors" "fmt" "io" - "io/ioutil" "math" "mime" "net/http" @@ -26,9 +25,9 @@ import ( // the requests to access protected resources on the OAuth 2.0 // provider's backend. // -// This type is a mirror of oauth2.Token and exists to break +// This type is a mirror of [golang.org/x/oauth2.Token] and exists to break // an otherwise-circular dependency. Other internal packages -// should convert this Token into an oauth2.Token before use. +// should convert this Token into an [golang.org/x/oauth2.Token] before use. type Token struct { // AccessToken is the token that authorizes and authenticates // the requests. @@ -50,9 +49,16 @@ type Token struct { // mechanisms for that TokenSource will not be used. Expiry time.Time + // ExpiresIn is the OAuth2 wire format "expires_in" field, + // which specifies how many seconds later the token expires, + // relative to an unknown time base approximately around "now". + // It is the application's responsibility to populate + // `Expiry` from `ExpiresIn` when required. + ExpiresIn int64 `json:"expires_in,omitempty"` + // Raw optionally contains extra metadata from the server // when updating a token. - Raw interface{} + Raw any } // tokenJSON is the struct representing the HTTP response from OAuth2 @@ -99,14 +105,6 @@ func (e *expirationTime) UnmarshalJSON(b []byte) error { return nil } -// RegisterBrokenAuthHeaderProvider previously did something. It is now a no-op. -// -// Deprecated: this function no longer does anything. Caller code that -// wants to avoid potential extra HTTP requests made during -// auto-probing of the provider's auth style should set -// Endpoint.AuthStyle. -func RegisterBrokenAuthHeaderProvider(tokenURL string) {} - // AuthStyle is a copy of the golang.org/x/oauth2 package's AuthStyle type. type AuthStyle int @@ -143,6 +141,11 @@ func (lc *LazyAuthStyleCache) Get() *AuthStyleCache { return c } +type authStyleCacheKey struct { + url string + clientID string +} + // AuthStyleCache is the set of tokenURLs we've successfully used via // RetrieveToken and which style auth we ended up using. // It's called a cache, but it doesn't (yet?) shrink. It's expected that @@ -150,26 +153,26 @@ func (lc *LazyAuthStyleCache) Get() *AuthStyleCache { // small. type AuthStyleCache struct { mu sync.Mutex - m map[string]AuthStyle // keyed by tokenURL + m map[authStyleCacheKey]AuthStyle } // lookupAuthStyle reports which auth style we last used with tokenURL // when calling RetrieveToken and whether we have ever done so. -func (c *AuthStyleCache) lookupAuthStyle(tokenURL string) (style AuthStyle, ok bool) { +func (c *AuthStyleCache) lookupAuthStyle(tokenURL, clientID string) (style AuthStyle, ok bool) { c.mu.Lock() defer c.mu.Unlock() - style, ok = c.m[tokenURL] + style, ok = c.m[authStyleCacheKey{tokenURL, clientID}] return } // setAuthStyle adds an entry to authStyleCache, documented above. -func (c *AuthStyleCache) setAuthStyle(tokenURL string, v AuthStyle) { +func (c *AuthStyleCache) setAuthStyle(tokenURL, clientID string, v AuthStyle) { c.mu.Lock() defer c.mu.Unlock() if c.m == nil { - c.m = make(map[string]AuthStyle) + c.m = make(map[authStyleCacheKey]AuthStyle) } - c.m[tokenURL] = v + c.m[authStyleCacheKey{tokenURL, clientID}] = v } // newTokenRequest returns a new *http.Request to retrieve a new token @@ -210,9 +213,9 @@ func cloneURLValues(v url.Values) url.Values { } func RetrieveToken(ctx context.Context, clientID, clientSecret, tokenURL string, v url.Values, authStyle AuthStyle, styleCache *AuthStyleCache) (*Token, error) { - needsAuthStyleProbe := authStyle == 0 + needsAuthStyleProbe := authStyle == AuthStyleUnknown if needsAuthStyleProbe { - if style, ok := styleCache.lookupAuthStyle(tokenURL); ok { + if style, ok := styleCache.lookupAuthStyle(tokenURL, clientID); ok { authStyle = style needsAuthStyleProbe = false } else { @@ -242,7 +245,7 @@ func RetrieveToken(ctx context.Context, clientID, clientSecret, tokenURL string, token, err = doTokenRoundTrip(ctx, req) } if needsAuthStyleProbe && err == nil { - styleCache.setAuthStyle(tokenURL, authStyle) + styleCache.setAuthStyle(tokenURL, clientID, authStyle) } // Don't overwrite `RefreshToken` with an empty value // if this was a token refreshing request. @@ -257,7 +260,7 @@ func doTokenRoundTrip(ctx context.Context, req *http.Request) (*Token, error) { if err != nil { return nil, err } - body, err := ioutil.ReadAll(io.LimitReader(r.Body, 1<<20)) + body, err := io.ReadAll(io.LimitReader(r.Body, 1<<20)) r.Body.Close() if err != nil { return nil, fmt.Errorf("oauth2: cannot fetch token: %v", err) @@ -312,7 +315,8 @@ func doTokenRoundTrip(ctx context.Context, req *http.Request) (*Token, error) { TokenType: tj.TokenType, RefreshToken: tj.RefreshToken, Expiry: tj.expiry(), - Raw: make(map[string]interface{}), + ExpiresIn: int64(tj.ExpiresIn), + Raw: make(map[string]any), } json.Unmarshal(body, &token.Raw) // no error checks for optional fields } diff --git a/vendor/golang.org/x/oauth2/internal/transport.go b/vendor/golang.org/x/oauth2/internal/transport.go index b9db01ddfd..afc0aeb274 100644 --- a/vendor/golang.org/x/oauth2/internal/transport.go +++ b/vendor/golang.org/x/oauth2/internal/transport.go @@ -9,8 +9,8 @@ import ( "net/http" ) -// HTTPClient is the context key to use with golang.org/x/net/context's -// WithValue function to associate an *http.Client value with a context. +// HTTPClient is the context key to use with [context.WithValue] +// to associate an [*http.Client] value with a context. var HTTPClient ContextKey // ContextKey is just an empty struct. It exists so HTTPClient can be diff --git a/vendor/golang.org/x/oauth2/oauth2.go b/vendor/golang.org/x/oauth2/oauth2.go index 74f052aa9f..de34feb844 100644 --- a/vendor/golang.org/x/oauth2/oauth2.go +++ b/vendor/golang.org/x/oauth2/oauth2.go @@ -22,9 +22,9 @@ import ( ) // NoContext is the default context you should supply if not using -// your own context.Context (see https://golang.org/x/net/context). +// your own [context.Context]. // -// Deprecated: Use context.Background() or context.TODO() instead. +// Deprecated: Use [context.Background] or [context.TODO] instead. var NoContext = context.TODO() // RegisterBrokenAuthHeaderProvider previously did something. It is now a no-op. @@ -37,8 +37,8 @@ func RegisterBrokenAuthHeaderProvider(tokenURL string) {} // Config describes a typical 3-legged OAuth2 flow, with both the // client application information and the server's endpoint URLs. -// For the client credentials 2-legged OAuth2 flow, see the clientcredentials -// package (https://golang.org/x/oauth2/clientcredentials). +// For the client credentials 2-legged OAuth2 flow, see the +// [golang.org/x/oauth2/clientcredentials] package. type Config struct { // ClientID is the application's ID. ClientID string @@ -46,7 +46,7 @@ type Config struct { // ClientSecret is the application's secret. ClientSecret string - // Endpoint contains the resource server's token endpoint + // Endpoint contains the authorization server's token endpoint // URLs. These are constants specific to each server and are // often available via site-specific packages, such as // google.Endpoint or github.Endpoint. @@ -135,7 +135,7 @@ type setParam struct{ k, v string } func (p setParam) setValue(m url.Values) { m.Set(p.k, p.v) } -// SetAuthURLParam builds an AuthCodeOption which passes key/value parameters +// SetAuthURLParam builds an [AuthCodeOption] which passes key/value parameters // to a provider's authorization endpoint. func SetAuthURLParam(key, value string) AuthCodeOption { return setParam{key, value} @@ -148,8 +148,8 @@ func SetAuthURLParam(key, value string) AuthCodeOption { // request and callback. The authorization server includes this value when // redirecting the user agent back to the client. // -// Opts may include AccessTypeOnline or AccessTypeOffline, as well -// as ApprovalForce. +// Opts may include [AccessTypeOnline] or [AccessTypeOffline], as well +// as [ApprovalForce]. // // To protect against CSRF attacks, opts should include a PKCE challenge // (S256ChallengeOption). Not all servers support PKCE. An alternative is to @@ -194,7 +194,7 @@ func (c *Config) AuthCodeURL(state string, opts ...AuthCodeOption) string { // and when other authorization grant types are not available." // See https://tools.ietf.org/html/rfc6749#section-4.3 for more info. // -// The provided context optionally controls which HTTP client is used. See the HTTPClient variable. +// The provided context optionally controls which HTTP client is used. See the [HTTPClient] variable. func (c *Config) PasswordCredentialsToken(ctx context.Context, username, password string) (*Token, error) { v := url.Values{ "grant_type": {"password"}, @@ -212,10 +212,10 @@ func (c *Config) PasswordCredentialsToken(ctx context.Context, username, passwor // It is used after a resource provider redirects the user back // to the Redirect URI (the URL obtained from AuthCodeURL). // -// The provided context optionally controls which HTTP client is used. See the HTTPClient variable. +// The provided context optionally controls which HTTP client is used. See the [HTTPClient] variable. // -// The code will be in the *http.Request.FormValue("code"). Before -// calling Exchange, be sure to validate FormValue("state") if you are +// The code will be in the [http.Request.FormValue]("code"). Before +// calling Exchange, be sure to validate [http.Request.FormValue]("state") if you are // using it to protect against CSRF attacks. // // If using PKCE to protect against CSRF attacks, opts should include a @@ -242,10 +242,10 @@ func (c *Config) Client(ctx context.Context, t *Token) *http.Client { return NewClient(ctx, c.TokenSource(ctx, t)) } -// TokenSource returns a TokenSource that returns t until t expires, +// TokenSource returns a [TokenSource] that returns t until t expires, // automatically refreshing it as necessary using the provided context. // -// Most users will use Config.Client instead. +// Most users will use [Config.Client] instead. func (c *Config) TokenSource(ctx context.Context, t *Token) TokenSource { tkr := &tokenRefresher{ ctx: ctx, @@ -260,7 +260,7 @@ func (c *Config) TokenSource(ctx context.Context, t *Token) TokenSource { } } -// tokenRefresher is a TokenSource that makes "grant_type"=="refresh_token" +// tokenRefresher is a TokenSource that makes "grant_type=refresh_token" // HTTP requests to renew a token using a RefreshToken. type tokenRefresher struct { ctx context.Context // used to get HTTP requests @@ -288,7 +288,7 @@ func (tf *tokenRefresher) Token() (*Token, error) { if tf.refreshToken != tk.RefreshToken { tf.refreshToken = tk.RefreshToken } - return tk, err + return tk, nil } // reuseTokenSource is a TokenSource that holds a single token in memory @@ -305,8 +305,7 @@ type reuseTokenSource struct { } // Token returns the current token if it's still valid, else will -// refresh the current token (using r.Context for HTTP client -// information) and return the new one. +// refresh the current token and return the new one. func (s *reuseTokenSource) Token() (*Token, error) { s.mu.Lock() defer s.mu.Unlock() @@ -322,7 +321,7 @@ func (s *reuseTokenSource) Token() (*Token, error) { return t, nil } -// StaticTokenSource returns a TokenSource that always returns the same token. +// StaticTokenSource returns a [TokenSource] that always returns the same token. // Because the provided token t is never refreshed, StaticTokenSource is only // useful for tokens that never expire. func StaticTokenSource(t *Token) TokenSource { @@ -338,16 +337,16 @@ func (s staticTokenSource) Token() (*Token, error) { return s.t, nil } -// HTTPClient is the context key to use with golang.org/x/net/context's -// WithValue function to associate an *http.Client value with a context. +// HTTPClient is the context key to use with [context.WithValue] +// to associate a [*http.Client] value with a context. var HTTPClient internal.ContextKey -// NewClient creates an *http.Client from a Context and TokenSource. +// NewClient creates an [*http.Client] from a [context.Context] and [TokenSource]. // The returned client is not valid beyond the lifetime of the context. // -// Note that if a custom *http.Client is provided via the Context it +// Note that if a custom [*http.Client] is provided via the [context.Context] it // is used only for token acquisition and is not used to configure the -// *http.Client returned from NewClient. +// [*http.Client] returned from NewClient. // // As a special case, if src is nil, a non-OAuth2 client is returned // using the provided context. This exists to support related OAuth2 @@ -356,15 +355,19 @@ func NewClient(ctx context.Context, src TokenSource) *http.Client { if src == nil { return internal.ContextClient(ctx) } + cc := internal.ContextClient(ctx) return &http.Client{ Transport: &Transport{ - Base: internal.ContextClient(ctx).Transport, + Base: cc.Transport, Source: ReuseTokenSource(nil, src), }, + CheckRedirect: cc.CheckRedirect, + Jar: cc.Jar, + Timeout: cc.Timeout, } } -// ReuseTokenSource returns a TokenSource which repeatedly returns the +// ReuseTokenSource returns a [TokenSource] which repeatedly returns the // same token as long as it's valid, starting with t. // When its cached token is invalid, a new token is obtained from src. // @@ -372,10 +375,10 @@ func NewClient(ctx context.Context, src TokenSource) *http.Client { // (such as a file on disk) between runs of a program, rather than // obtaining new tokens unnecessarily. // -// The initial token t may be nil, in which case the TokenSource is +// The initial token t may be nil, in which case the [TokenSource] is // wrapped in a caching version if it isn't one already. This also // means it's always safe to wrap ReuseTokenSource around any other -// TokenSource without adverse effects. +// [TokenSource] without adverse effects. func ReuseTokenSource(t *Token, src TokenSource) TokenSource { // Don't wrap a reuseTokenSource in itself. That would work, // but cause an unnecessary number of mutex operations. @@ -393,8 +396,8 @@ func ReuseTokenSource(t *Token, src TokenSource) TokenSource { } } -// ReuseTokenSourceWithExpiry returns a TokenSource that acts in the same manner as the -// TokenSource returned by ReuseTokenSource, except the expiry buffer is +// ReuseTokenSourceWithExpiry returns a [TokenSource] that acts in the same manner as the +// [TokenSource] returned by [ReuseTokenSource], except the expiry buffer is // configurable. The expiration time of a token is calculated as // t.Expiry.Add(-earlyExpiry). func ReuseTokenSourceWithExpiry(t *Token, src TokenSource, earlyExpiry time.Duration) TokenSource { diff --git a/vendor/golang.org/x/oauth2/pkce.go b/vendor/golang.org/x/oauth2/pkce.go index 6a95da975c..cea8374d51 100644 --- a/vendor/golang.org/x/oauth2/pkce.go +++ b/vendor/golang.org/x/oauth2/pkce.go @@ -1,6 +1,7 @@ // Copyright 2023 The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. + package oauth2 import ( @@ -20,9 +21,9 @@ const ( // This follows recommendations in RFC 7636. // // A fresh verifier should be generated for each authorization. -// S256ChallengeOption(verifier) should then be passed to Config.AuthCodeURL -// (or Config.DeviceAuth) and VerifierOption(verifier) to Config.Exchange -// (or Config.DeviceAccessToken). +// The resulting verifier should be passed to [Config.AuthCodeURL] or [Config.DeviceAuth] +// with [S256ChallengeOption], and to [Config.Exchange] or [Config.DeviceAccessToken] +// with [VerifierOption]. func GenerateVerifier() string { // "RECOMMENDED that the output of a suitable random number generator be // used to create a 32-octet sequence. The octet sequence is then @@ -36,22 +37,22 @@ func GenerateVerifier() string { return base64.RawURLEncoding.EncodeToString(data) } -// VerifierOption returns a PKCE code verifier AuthCodeOption. It should be -// passed to Config.Exchange or Config.DeviceAccessToken only. +// VerifierOption returns a PKCE code verifier [AuthCodeOption]. It should only be +// passed to [Config.Exchange] or [Config.DeviceAccessToken]. func VerifierOption(verifier string) AuthCodeOption { return setParam{k: codeVerifierKey, v: verifier} } // S256ChallengeFromVerifier returns a PKCE code challenge derived from verifier with method S256. // -// Prefer to use S256ChallengeOption where possible. +// Prefer to use [S256ChallengeOption] where possible. func S256ChallengeFromVerifier(verifier string) string { sha := sha256.Sum256([]byte(verifier)) return base64.RawURLEncoding.EncodeToString(sha[:]) } // S256ChallengeOption derives a PKCE code challenge derived from verifier with -// method S256. It should be passed to Config.AuthCodeURL or Config.DeviceAuth +// method S256. It should be passed to [Config.AuthCodeURL] or [Config.DeviceAuth] // only. func S256ChallengeOption(verifier string) AuthCodeOption { return challengeOption{ diff --git a/vendor/golang.org/x/oauth2/token.go b/vendor/golang.org/x/oauth2/token.go index 109997d77c..239ec32962 100644 --- a/vendor/golang.org/x/oauth2/token.go +++ b/vendor/golang.org/x/oauth2/token.go @@ -44,7 +44,7 @@ type Token struct { // Expiry is the optional expiration time of the access token. // - // If zero, TokenSource implementations will reuse the same + // If zero, [TokenSource] implementations will reuse the same // token forever and RefreshToken or equivalent // mechanisms for that TokenSource will not be used. Expiry time.Time `json:"expiry,omitempty"` @@ -58,7 +58,7 @@ type Token struct { // raw optionally contains extra metadata from the server // when updating a token. - raw interface{} + raw any // expiryDelta is used to calculate when a token is considered // expired, by subtracting from Expiry. If zero, defaultExpiryDelta @@ -86,16 +86,16 @@ func (t *Token) Type() string { // SetAuthHeader sets the Authorization header to r using the access // token in t. // -// This method is unnecessary when using Transport or an HTTP Client +// This method is unnecessary when using [Transport] or an HTTP Client // returned by this package. func (t *Token) SetAuthHeader(r *http.Request) { r.Header.Set("Authorization", t.Type()+" "+t.AccessToken) } -// WithExtra returns a new Token that's a clone of t, but using the +// WithExtra returns a new [Token] that's a clone of t, but using the // provided raw extra map. This is only intended for use by packages // implementing derivative OAuth2 flows. -func (t *Token) WithExtra(extra interface{}) *Token { +func (t *Token) WithExtra(extra any) *Token { t2 := new(Token) *t2 = *t t2.raw = extra @@ -105,8 +105,8 @@ func (t *Token) WithExtra(extra interface{}) *Token { // Extra returns an extra field. // Extra fields are key-value pairs returned by the server as a // part of the token retrieval response. -func (t *Token) Extra(key string) interface{} { - if raw, ok := t.raw.(map[string]interface{}); ok { +func (t *Token) Extra(key string) any { + if raw, ok := t.raw.(map[string]any); ok { return raw[key] } @@ -163,13 +163,14 @@ func tokenFromInternal(t *internal.Token) *Token { TokenType: t.TokenType, RefreshToken: t.RefreshToken, Expiry: t.Expiry, + ExpiresIn: t.ExpiresIn, raw: t.Raw, } } // retrieveToken takes a *Config and uses that to retrieve an *internal.Token. // This token is then mapped from *internal.Token into an *oauth2.Token which is returned along -// with an error.. +// with an error. func retrieveToken(ctx context.Context, c *Config, v url.Values) (*Token, error) { tk, err := internal.RetrieveToken(ctx, c.ClientID, c.ClientSecret, c.Endpoint.TokenURL, v, internal.AuthStyle(c.Endpoint.AuthStyle), c.authStyleCache.Get()) if err != nil { diff --git a/vendor/golang.org/x/oauth2/transport.go b/vendor/golang.org/x/oauth2/transport.go index 90657915fb..8bbebbac9e 100644 --- a/vendor/golang.org/x/oauth2/transport.go +++ b/vendor/golang.org/x/oauth2/transport.go @@ -11,12 +11,12 @@ import ( "sync" ) -// Transport is an http.RoundTripper that makes OAuth 2.0 HTTP requests, -// wrapping a base RoundTripper and adding an Authorization header -// with a token from the supplied Sources. +// Transport is an [http.RoundTripper] that makes OAuth 2.0 HTTP requests, +// wrapping a base [http.RoundTripper] and adding an Authorization header +// with a token from the supplied [TokenSource]. // // Transport is a low-level mechanism. Most code will use the -// higher-level Config.Client method instead. +// higher-level [Config.Client] method instead. type Transport struct { // Source supplies the token to add to outgoing requests' // Authorization headers. @@ -47,7 +47,7 @@ func (t *Transport) RoundTrip(req *http.Request) (*http.Response, error) { return nil, err } - req2 := cloneRequest(req) // per RoundTripper contract + req2 := req.Clone(req.Context()) token.SetAuthHeader(req2) // req.Body is assumed to be closed by the base RoundTripper. @@ -73,17 +73,3 @@ func (t *Transport) base() http.RoundTripper { } return http.DefaultTransport } - -// cloneRequest returns a clone of the provided *http.Request. -// The clone is a shallow copy of the struct and its Header map. -func cloneRequest(r *http.Request) *http.Request { - // shallow copy of the struct - r2 := new(http.Request) - *r2 = *r - // deep copy of the Header - r2.Header = make(http.Header, len(r.Header)) - for k, s := range r.Header { - r2.Header[k] = append([]string(nil), s...) - } - return r2 -} diff --git a/vendor/golang.org/x/time/rate/rate.go b/vendor/golang.org/x/time/rate/rate.go index 93a798ab63..794b2e32bf 100644 --- a/vendor/golang.org/x/time/rate/rate.go +++ b/vendor/golang.org/x/time/rate/rate.go @@ -85,7 +85,7 @@ func (lim *Limiter) Burst() int { // TokensAt returns the number of tokens available at time t. func (lim *Limiter) TokensAt(t time.Time) float64 { lim.mu.Lock() - _, tokens := lim.advance(t) // does not mutate lim + tokens := lim.advance(t) // does not mutate lim lim.mu.Unlock() return tokens } @@ -186,7 +186,7 @@ func (r *Reservation) CancelAt(t time.Time) { return } // advance time to now - t, tokens := r.lim.advance(t) + tokens := r.lim.advance(t) // calculate new number of tokens tokens += restoreTokens if burst := float64(r.lim.burst); tokens > burst { @@ -307,7 +307,7 @@ func (lim *Limiter) SetLimitAt(t time.Time, newLimit Limit) { lim.mu.Lock() defer lim.mu.Unlock() - t, tokens := lim.advance(t) + tokens := lim.advance(t) lim.last = t lim.tokens = tokens @@ -324,7 +324,7 @@ func (lim *Limiter) SetBurstAt(t time.Time, newBurst int) { lim.mu.Lock() defer lim.mu.Unlock() - t, tokens := lim.advance(t) + tokens := lim.advance(t) lim.last = t lim.tokens = tokens @@ -347,7 +347,7 @@ func (lim *Limiter) reserveN(t time.Time, n int, maxFutureReserve time.Duration) } } - t, tokens := lim.advance(t) + tokens := lim.advance(t) // Calculate the remaining number of tokens resulting from the request. tokens -= float64(n) @@ -380,10 +380,11 @@ func (lim *Limiter) reserveN(t time.Time, n int, maxFutureReserve time.Duration) return r } -// advance calculates and returns an updated state for lim resulting from the passage of time. +// advance calculates and returns an updated number of tokens for lim +// resulting from the passage of time. // lim is not changed. // advance requires that lim.mu is held. -func (lim *Limiter) advance(t time.Time) (newT time.Time, newTokens float64) { +func (lim *Limiter) advance(t time.Time) (newTokens float64) { last := lim.last if t.Before(last) { last = t @@ -396,7 +397,7 @@ func (lim *Limiter) advance(t time.Time) (newT time.Time, newTokens float64) { if burst := float64(lim.burst); tokens > burst { tokens = burst } - return t, tokens + return tokens } // durationFromTokens is a unit conversion function from the number of tokens to the duration @@ -405,8 +406,15 @@ func (limit Limit) durationFromTokens(tokens float64) time.Duration { if limit <= 0 { return InfDuration } - seconds := tokens / float64(limit) - return time.Duration(float64(time.Second) * seconds) + + duration := (tokens / float64(limit)) * float64(time.Second) + + // Cap the duration to the maximum representable int64 value, to avoid overflow. + if duration > float64(math.MaxInt64) { + return InfDuration + } + + return time.Duration(duration) } // tokensFromDuration is a unit conversion function from a time duration to the number of tokens diff --git a/vendor/golang.org/x/time/rate/sometimes.go b/vendor/golang.org/x/time/rate/sometimes.go index 6ba99ddb67..9b83932692 100644 --- a/vendor/golang.org/x/time/rate/sometimes.go +++ b/vendor/golang.org/x/time/rate/sometimes.go @@ -61,7 +61,9 @@ func (s *Sometimes) Do(f func()) { (s.Every > 0 && s.count%s.Every == 0) || (s.Interval > 0 && time.Since(s.last) >= s.Interval) { f() - s.last = time.Now() + if s.Interval > 0 { + s.last = time.Now() + } } s.count++ } diff --git a/vendor/google.golang.org/protobuf/encoding/protowire/wire.go b/vendor/google.golang.org/protobuf/encoding/protowire/wire.go index e942bc983e..743bfb81d6 100644 --- a/vendor/google.golang.org/protobuf/encoding/protowire/wire.go +++ b/vendor/google.golang.org/protobuf/encoding/protowire/wire.go @@ -371,7 +371,31 @@ func ConsumeVarint(b []byte) (v uint64, n int) { func SizeVarint(v uint64) int { // This computes 1 + (bits.Len64(v)-1)/7. // 9/64 is a good enough approximation of 1/7 - return int(9*uint32(bits.Len64(v))+64) / 64 + // + // The Go compiler can translate the bits.LeadingZeros64 call into the LZCNT + // instruction, which is very fast on CPUs from the last few years. The + // specific way of expressing the calculation matches C++ Protobuf, see + // https://godbolt.org/z/4P3h53oM4 for the C++ code and how gcc/clang + // optimize that function for GOAMD64=v1 and GOAMD64=v3 (-march=haswell). + + // By OR'ing v with 1, we guarantee that v is never 0, without changing the + // result of SizeVarint. LZCNT is not defined for 0, meaning the compiler + // needs to add extra instructions to handle that case. + // + // The Go compiler currently (go1.24.4) does not make use of this knowledge. + // This opportunity (removing the XOR instruction, which handles the 0 case) + // results in a small (1%) performance win across CPU architectures. + // + // Independently of avoiding the 0 case, we need the v |= 1 line because + // it allows the Go compiler to eliminate an extra XCHGL barrier. + v |= 1 + + // It would be clearer to write log2value := 63 - uint32(...), but + // writing uint32(...) ^ 63 is much more efficient (-14% ARM, -20% Intel). + // Proof of identity for our value range [0..63]: + // https://go.dev/play/p/Pdn9hEWYakX + log2value := uint32(bits.LeadingZeros64(v)) ^ 63 + return int((log2value*9 + (64 + 9)) / 64) } // AppendFixed32 appends v to b as a little-endian uint32. diff --git a/vendor/google.golang.org/protobuf/internal/editiondefaults/editions_defaults.binpb b/vendor/google.golang.org/protobuf/internal/editiondefaults/editions_defaults.binpb index 5a57ef6f3c..04696351ee 100644 Binary files a/vendor/google.golang.org/protobuf/internal/editiondefaults/editions_defaults.binpb and b/vendor/google.golang.org/protobuf/internal/editiondefaults/editions_defaults.binpb differ diff --git a/vendor/google.golang.org/protobuf/internal/editionssupport/editions.go b/vendor/google.golang.org/protobuf/internal/editionssupport/editions.go index bf1aba0e85..7b9f01afb0 100644 --- a/vendor/google.golang.org/protobuf/internal/editionssupport/editions.go +++ b/vendor/google.golang.org/protobuf/internal/editionssupport/editions.go @@ -9,7 +9,7 @@ import "google.golang.org/protobuf/types/descriptorpb" const ( Minimum = descriptorpb.Edition_EDITION_PROTO2 - Maximum = descriptorpb.Edition_EDITION_2023 + Maximum = descriptorpb.Edition_EDITION_2024 // MaximumKnown is the maximum edition that is known to Go Protobuf, but not // declared as supported. In other words: end users cannot use it, but diff --git a/vendor/google.golang.org/protobuf/internal/filedesc/desc.go b/vendor/google.golang.org/protobuf/internal/filedesc/desc.go index 688aabe434..dbcf90b871 100644 --- a/vendor/google.golang.org/protobuf/internal/filedesc/desc.go +++ b/vendor/google.golang.org/protobuf/internal/filedesc/desc.go @@ -72,9 +72,10 @@ type ( EditionFeatures EditionFeatures } FileL2 struct { - Options func() protoreflect.ProtoMessage - Imports FileImports - Locations SourceLocations + Options func() protoreflect.ProtoMessage + Imports FileImports + OptionImports func() protoreflect.FileImports + Locations SourceLocations } // EditionFeatures is a frequently-instantiated struct, so please take care @@ -126,12 +127,9 @@ func (fd *File) ParentFile() protoreflect.FileDescriptor { return fd } func (fd *File) Parent() protoreflect.Descriptor { return nil } func (fd *File) Index() int { return 0 } func (fd *File) Syntax() protoreflect.Syntax { return fd.L1.Syntax } - -// Not exported and just used to reconstruct the original FileDescriptor proto -func (fd *File) Edition() int32 { return int32(fd.L1.Edition) } -func (fd *File) Name() protoreflect.Name { return fd.L1.Package.Name() } -func (fd *File) FullName() protoreflect.FullName { return fd.L1.Package } -func (fd *File) IsPlaceholder() bool { return false } +func (fd *File) Name() protoreflect.Name { return fd.L1.Package.Name() } +func (fd *File) FullName() protoreflect.FullName { return fd.L1.Package } +func (fd *File) IsPlaceholder() bool { return false } func (fd *File) Options() protoreflect.ProtoMessage { if f := fd.lazyInit().Options; f != nil { return f() @@ -150,6 +148,16 @@ func (fd *File) Format(s fmt.State, r rune) { descfmt.FormatD func (fd *File) ProtoType(protoreflect.FileDescriptor) {} func (fd *File) ProtoInternal(pragma.DoNotImplement) {} +// The next two are not part of the FileDescriptor interface. They are just used to reconstruct +// the original FileDescriptor proto. +func (fd *File) Edition() int32 { return int32(fd.L1.Edition) } +func (fd *File) OptionImports() protoreflect.FileImports { + if f := fd.lazyInit().OptionImports; f != nil { + return f() + } + return emptyFiles +} + func (fd *File) lazyInit() *FileL2 { if atomic.LoadUint32(&fd.once) == 0 { fd.lazyInitOnce() @@ -182,9 +190,9 @@ type ( L2 *EnumL2 // protected by fileDesc.once } EnumL1 struct { - eagerValues bool // controls whether EnumL2.Values is already populated - EditionFeatures EditionFeatures + Visibility int32 + eagerValues bool // controls whether EnumL2.Values is already populated } EnumL2 struct { Options func() protoreflect.ProtoMessage @@ -219,6 +227,11 @@ func (ed *Enum) ReservedNames() protoreflect.Names { return &ed.lazyInit() func (ed *Enum) ReservedRanges() protoreflect.EnumRanges { return &ed.lazyInit().ReservedRanges } func (ed *Enum) Format(s fmt.State, r rune) { descfmt.FormatDesc(s, r, ed) } func (ed *Enum) ProtoType(protoreflect.EnumDescriptor) {} + +// This is not part of the EnumDescriptor interface. It is just used to reconstruct +// the original FileDescriptor proto. +func (ed *Enum) Visibility() int32 { return ed.L1.Visibility } + func (ed *Enum) lazyInit() *EnumL2 { ed.L0.ParentFile.lazyInit() // implicitly initializes L2 return ed.L2 @@ -244,13 +257,13 @@ type ( L2 *MessageL2 // protected by fileDesc.once } MessageL1 struct { - Enums Enums - Messages Messages - Extensions Extensions - IsMapEntry bool // promoted from google.protobuf.MessageOptions - IsMessageSet bool // promoted from google.protobuf.MessageOptions - + Enums Enums + Messages Messages + Extensions Extensions EditionFeatures EditionFeatures + Visibility int32 + IsMapEntry bool // promoted from google.protobuf.MessageOptions + IsMessageSet bool // promoted from google.protobuf.MessageOptions } MessageL2 struct { Options func() protoreflect.ProtoMessage @@ -319,6 +332,11 @@ func (md *Message) Messages() protoreflect.MessageDescriptors { return &md.L func (md *Message) Extensions() protoreflect.ExtensionDescriptors { return &md.L1.Extensions } func (md *Message) ProtoType(protoreflect.MessageDescriptor) {} func (md *Message) Format(s fmt.State, r rune) { descfmt.FormatDesc(s, r, md) } + +// This is not part of the MessageDescriptor interface. It is just used to reconstruct +// the original FileDescriptor proto. +func (md *Message) Visibility() int32 { return md.L1.Visibility } + func (md *Message) lazyInit() *MessageL2 { md.L0.ParentFile.lazyInit() // implicitly initializes L2 return md.L2 diff --git a/vendor/google.golang.org/protobuf/internal/filedesc/desc_init.go b/vendor/google.golang.org/protobuf/internal/filedesc/desc_init.go index d2f549497e..e91860f5a2 100644 --- a/vendor/google.golang.org/protobuf/internal/filedesc/desc_init.go +++ b/vendor/google.golang.org/protobuf/internal/filedesc/desc_init.go @@ -284,6 +284,13 @@ func (ed *Enum) unmarshalSeed(b []byte, sb *strs.Builder, pf *File, pd protorefl case genid.EnumDescriptorProto_Value_field_number: numValues++ } + case protowire.VarintType: + v, m := protowire.ConsumeVarint(b) + b = b[m:] + switch num { + case genid.EnumDescriptorProto_Visibility_field_number: + ed.L1.Visibility = int32(v) + } default: m := protowire.ConsumeFieldValue(num, typ, b) b = b[m:] @@ -365,6 +372,13 @@ func (md *Message) unmarshalSeed(b []byte, sb *strs.Builder, pf *File, pd protor md.unmarshalSeedOptions(v) } prevField = num + case protowire.VarintType: + v, m := protowire.ConsumeVarint(b) + b = b[m:] + switch num { + case genid.DescriptorProto_Visibility_field_number: + md.L1.Visibility = int32(v) + } default: m := protowire.ConsumeFieldValue(num, typ, b) b = b[m:] diff --git a/vendor/google.golang.org/protobuf/internal/filedesc/desc_lazy.go b/vendor/google.golang.org/protobuf/internal/filedesc/desc_lazy.go index d4c94458bd..dd31faaeb0 100644 --- a/vendor/google.golang.org/protobuf/internal/filedesc/desc_lazy.go +++ b/vendor/google.golang.org/protobuf/internal/filedesc/desc_lazy.go @@ -134,6 +134,7 @@ func (fd *File) unmarshalFull(b []byte) { var enumIdx, messageIdx, extensionIdx, serviceIdx int var rawOptions []byte + var optionImports []string fd.L2 = new(FileL2) for len(b) > 0 { num, typ, n := protowire.ConsumeTag(b) @@ -157,6 +158,8 @@ func (fd *File) unmarshalFull(b []byte) { imp = PlaceholderFile(path) } fd.L2.Imports = append(fd.L2.Imports, protoreflect.FileImport{FileDescriptor: imp}) + case genid.FileDescriptorProto_OptionDependency_field_number: + optionImports = append(optionImports, sb.MakeString(v)) case genid.FileDescriptorProto_EnumType_field_number: fd.L1.Enums.List[enumIdx].unmarshalFull(v, sb) enumIdx++ @@ -178,6 +181,23 @@ func (fd *File) unmarshalFull(b []byte) { } } fd.L2.Options = fd.builder.optionsUnmarshaler(&descopts.File, rawOptions) + if len(optionImports) > 0 { + var imps FileImports + var once sync.Once + fd.L2.OptionImports = func() protoreflect.FileImports { + once.Do(func() { + imps = make(FileImports, len(optionImports)) + for i, path := range optionImports { + imp, _ := fd.builder.FileRegistry.FindFileByPath(path) + if imp == nil { + imp = PlaceholderFile(path) + } + imps[i] = protoreflect.FileImport{FileDescriptor: imp} + } + }) + return &imps + } + } } func (ed *Enum) unmarshalFull(b []byte, sb *strs.Builder) { diff --git a/vendor/google.golang.org/protobuf/internal/filedesc/editions.go b/vendor/google.golang.org/protobuf/internal/filedesc/editions.go index 10132c9b38..66ba906806 100644 --- a/vendor/google.golang.org/protobuf/internal/filedesc/editions.go +++ b/vendor/google.golang.org/protobuf/internal/filedesc/editions.go @@ -13,8 +13,10 @@ import ( "google.golang.org/protobuf/reflect/protoreflect" ) -var defaultsCache = make(map[Edition]EditionFeatures) -var defaultsKeys = []Edition{} +var ( + defaultsCache = make(map[Edition]EditionFeatures) + defaultsKeys = []Edition{} +) func init() { unmarshalEditionDefaults(editiondefaults.Defaults) @@ -41,7 +43,7 @@ func unmarshalGoFeature(b []byte, parent EditionFeatures) EditionFeatures { b = b[m:] parent.StripEnumPrefix = int(v) default: - panic(fmt.Sprintf("unkown field number %d while unmarshalling GoFeatures", num)) + panic(fmt.Sprintf("unknown field number %d while unmarshalling GoFeatures", num)) } } return parent @@ -69,8 +71,14 @@ func unmarshalFeatureSet(b []byte, parent EditionFeatures) EditionFeatures { parent.IsDelimitedEncoded = v == genid.FeatureSet_DELIMITED_enum_value case genid.FeatureSet_JsonFormat_field_number: parent.IsJSONCompliant = v == genid.FeatureSet_ALLOW_enum_value + case genid.FeatureSet_EnforceNamingStyle_field_number: + // EnforceNamingStyle is enforced in protoc, languages other than C++ + // are not supposed to do anything with this feature. + case genid.FeatureSet_DefaultSymbolVisibility_field_number: + // DefaultSymbolVisibility is enforced in protoc, runtimes should not + // inspect this value. default: - panic(fmt.Sprintf("unkown field number %d while unmarshalling FeatureSet", num)) + panic(fmt.Sprintf("unknown field number %d while unmarshalling FeatureSet", num)) } case protowire.BytesType: v, m := protowire.ConsumeBytes(b) @@ -144,7 +152,7 @@ func unmarshalEditionDefaults(b []byte) { _, m := protowire.ConsumeVarint(b) b = b[m:] default: - panic(fmt.Sprintf("unkown field number %d while unmarshalling EditionDefault", num)) + panic(fmt.Sprintf("unknown field number %d while unmarshalling EditionDefault", num)) } } } diff --git a/vendor/google.golang.org/protobuf/internal/filedesc/presence.go b/vendor/google.golang.org/protobuf/internal/filedesc/presence.go new file mode 100644 index 0000000000..a12ec9791c --- /dev/null +++ b/vendor/google.golang.org/protobuf/internal/filedesc/presence.go @@ -0,0 +1,33 @@ +// Copyright 2025 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package filedesc + +import "google.golang.org/protobuf/reflect/protoreflect" + +// UsePresenceForField reports whether the presence bitmap should be used for +// the specified field. +func UsePresenceForField(fd protoreflect.FieldDescriptor) (usePresence, canBeLazy bool) { + switch { + case fd.ContainingOneof() != nil && !fd.ContainingOneof().IsSynthetic(): + // Oneof fields never use the presence bitmap. + // + // Synthetic oneofs are an exception: Those are used to implement proto3 + // optional fields and hence should follow non-oneof field semantics. + return false, false + + case fd.IsMap(): + // Map-typed fields never use the presence bitmap. + return false, false + + case fd.Kind() == protoreflect.MessageKind || fd.Kind() == protoreflect.GroupKind: + // Lazy fields always use the presence bitmap (only messages can be lazy). + isLazy := fd.(interface{ IsLazy() bool }).IsLazy() + return isLazy, isLazy + + default: + // If the field has presence, use the presence bitmap. + return fd.HasPresence(), false + } +} diff --git a/vendor/google.golang.org/protobuf/internal/genid/api_gen.go b/vendor/google.golang.org/protobuf/internal/genid/api_gen.go index df8f918501..3ceb6fa7f5 100644 --- a/vendor/google.golang.org/protobuf/internal/genid/api_gen.go +++ b/vendor/google.golang.org/protobuf/internal/genid/api_gen.go @@ -27,6 +27,7 @@ const ( Api_SourceContext_field_name protoreflect.Name = "source_context" Api_Mixins_field_name protoreflect.Name = "mixins" Api_Syntax_field_name protoreflect.Name = "syntax" + Api_Edition_field_name protoreflect.Name = "edition" Api_Name_field_fullname protoreflect.FullName = "google.protobuf.Api.name" Api_Methods_field_fullname protoreflect.FullName = "google.protobuf.Api.methods" @@ -35,6 +36,7 @@ const ( Api_SourceContext_field_fullname protoreflect.FullName = "google.protobuf.Api.source_context" Api_Mixins_field_fullname protoreflect.FullName = "google.protobuf.Api.mixins" Api_Syntax_field_fullname protoreflect.FullName = "google.protobuf.Api.syntax" + Api_Edition_field_fullname protoreflect.FullName = "google.protobuf.Api.edition" ) // Field numbers for google.protobuf.Api. @@ -46,6 +48,7 @@ const ( Api_SourceContext_field_number protoreflect.FieldNumber = 5 Api_Mixins_field_number protoreflect.FieldNumber = 6 Api_Syntax_field_number protoreflect.FieldNumber = 7 + Api_Edition_field_number protoreflect.FieldNumber = 8 ) // Names for google.protobuf.Method. @@ -63,6 +66,7 @@ const ( Method_ResponseStreaming_field_name protoreflect.Name = "response_streaming" Method_Options_field_name protoreflect.Name = "options" Method_Syntax_field_name protoreflect.Name = "syntax" + Method_Edition_field_name protoreflect.Name = "edition" Method_Name_field_fullname protoreflect.FullName = "google.protobuf.Method.name" Method_RequestTypeUrl_field_fullname protoreflect.FullName = "google.protobuf.Method.request_type_url" @@ -71,6 +75,7 @@ const ( Method_ResponseStreaming_field_fullname protoreflect.FullName = "google.protobuf.Method.response_streaming" Method_Options_field_fullname protoreflect.FullName = "google.protobuf.Method.options" Method_Syntax_field_fullname protoreflect.FullName = "google.protobuf.Method.syntax" + Method_Edition_field_fullname protoreflect.FullName = "google.protobuf.Method.edition" ) // Field numbers for google.protobuf.Method. @@ -82,6 +87,7 @@ const ( Method_ResponseStreaming_field_number protoreflect.FieldNumber = 5 Method_Options_field_number protoreflect.FieldNumber = 6 Method_Syntax_field_number protoreflect.FieldNumber = 7 + Method_Edition_field_number protoreflect.FieldNumber = 8 ) // Names for google.protobuf.Mixin. diff --git a/vendor/google.golang.org/protobuf/internal/genid/descriptor_gen.go b/vendor/google.golang.org/protobuf/internal/genid/descriptor_gen.go index f30ab6b586..950a6a325a 100644 --- a/vendor/google.golang.org/protobuf/internal/genid/descriptor_gen.go +++ b/vendor/google.golang.org/protobuf/internal/genid/descriptor_gen.go @@ -34,6 +34,19 @@ const ( Edition_EDITION_MAX_enum_value = 2147483647 ) +// Full and short names for google.protobuf.SymbolVisibility. +const ( + SymbolVisibility_enum_fullname = "google.protobuf.SymbolVisibility" + SymbolVisibility_enum_name = "SymbolVisibility" +) + +// Enum values for google.protobuf.SymbolVisibility. +const ( + SymbolVisibility_VISIBILITY_UNSET_enum_value = 0 + SymbolVisibility_VISIBILITY_LOCAL_enum_value = 1 + SymbolVisibility_VISIBILITY_EXPORT_enum_value = 2 +) + // Names for google.protobuf.FileDescriptorSet. const ( FileDescriptorSet_message_name protoreflect.Name = "FileDescriptorSet" @@ -65,6 +78,7 @@ const ( FileDescriptorProto_Dependency_field_name protoreflect.Name = "dependency" FileDescriptorProto_PublicDependency_field_name protoreflect.Name = "public_dependency" FileDescriptorProto_WeakDependency_field_name protoreflect.Name = "weak_dependency" + FileDescriptorProto_OptionDependency_field_name protoreflect.Name = "option_dependency" FileDescriptorProto_MessageType_field_name protoreflect.Name = "message_type" FileDescriptorProto_EnumType_field_name protoreflect.Name = "enum_type" FileDescriptorProto_Service_field_name protoreflect.Name = "service" @@ -79,6 +93,7 @@ const ( FileDescriptorProto_Dependency_field_fullname protoreflect.FullName = "google.protobuf.FileDescriptorProto.dependency" FileDescriptorProto_PublicDependency_field_fullname protoreflect.FullName = "google.protobuf.FileDescriptorProto.public_dependency" FileDescriptorProto_WeakDependency_field_fullname protoreflect.FullName = "google.protobuf.FileDescriptorProto.weak_dependency" + FileDescriptorProto_OptionDependency_field_fullname protoreflect.FullName = "google.protobuf.FileDescriptorProto.option_dependency" FileDescriptorProto_MessageType_field_fullname protoreflect.FullName = "google.protobuf.FileDescriptorProto.message_type" FileDescriptorProto_EnumType_field_fullname protoreflect.FullName = "google.protobuf.FileDescriptorProto.enum_type" FileDescriptorProto_Service_field_fullname protoreflect.FullName = "google.protobuf.FileDescriptorProto.service" @@ -96,6 +111,7 @@ const ( FileDescriptorProto_Dependency_field_number protoreflect.FieldNumber = 3 FileDescriptorProto_PublicDependency_field_number protoreflect.FieldNumber = 10 FileDescriptorProto_WeakDependency_field_number protoreflect.FieldNumber = 11 + FileDescriptorProto_OptionDependency_field_number protoreflect.FieldNumber = 15 FileDescriptorProto_MessageType_field_number protoreflect.FieldNumber = 4 FileDescriptorProto_EnumType_field_number protoreflect.FieldNumber = 5 FileDescriptorProto_Service_field_number protoreflect.FieldNumber = 6 @@ -124,6 +140,7 @@ const ( DescriptorProto_Options_field_name protoreflect.Name = "options" DescriptorProto_ReservedRange_field_name protoreflect.Name = "reserved_range" DescriptorProto_ReservedName_field_name protoreflect.Name = "reserved_name" + DescriptorProto_Visibility_field_name protoreflect.Name = "visibility" DescriptorProto_Name_field_fullname protoreflect.FullName = "google.protobuf.DescriptorProto.name" DescriptorProto_Field_field_fullname protoreflect.FullName = "google.protobuf.DescriptorProto.field" @@ -135,6 +152,7 @@ const ( DescriptorProto_Options_field_fullname protoreflect.FullName = "google.protobuf.DescriptorProto.options" DescriptorProto_ReservedRange_field_fullname protoreflect.FullName = "google.protobuf.DescriptorProto.reserved_range" DescriptorProto_ReservedName_field_fullname protoreflect.FullName = "google.protobuf.DescriptorProto.reserved_name" + DescriptorProto_Visibility_field_fullname protoreflect.FullName = "google.protobuf.DescriptorProto.visibility" ) // Field numbers for google.protobuf.DescriptorProto. @@ -149,6 +167,7 @@ const ( DescriptorProto_Options_field_number protoreflect.FieldNumber = 7 DescriptorProto_ReservedRange_field_number protoreflect.FieldNumber = 9 DescriptorProto_ReservedName_field_number protoreflect.FieldNumber = 10 + DescriptorProto_Visibility_field_number protoreflect.FieldNumber = 11 ) // Names for google.protobuf.DescriptorProto.ExtensionRange. @@ -388,12 +407,14 @@ const ( EnumDescriptorProto_Options_field_name protoreflect.Name = "options" EnumDescriptorProto_ReservedRange_field_name protoreflect.Name = "reserved_range" EnumDescriptorProto_ReservedName_field_name protoreflect.Name = "reserved_name" + EnumDescriptorProto_Visibility_field_name protoreflect.Name = "visibility" EnumDescriptorProto_Name_field_fullname protoreflect.FullName = "google.protobuf.EnumDescriptorProto.name" EnumDescriptorProto_Value_field_fullname protoreflect.FullName = "google.protobuf.EnumDescriptorProto.value" EnumDescriptorProto_Options_field_fullname protoreflect.FullName = "google.protobuf.EnumDescriptorProto.options" EnumDescriptorProto_ReservedRange_field_fullname protoreflect.FullName = "google.protobuf.EnumDescriptorProto.reserved_range" EnumDescriptorProto_ReservedName_field_fullname protoreflect.FullName = "google.protobuf.EnumDescriptorProto.reserved_name" + EnumDescriptorProto_Visibility_field_fullname protoreflect.FullName = "google.protobuf.EnumDescriptorProto.visibility" ) // Field numbers for google.protobuf.EnumDescriptorProto. @@ -403,6 +424,7 @@ const ( EnumDescriptorProto_Options_field_number protoreflect.FieldNumber = 3 EnumDescriptorProto_ReservedRange_field_number protoreflect.FieldNumber = 4 EnumDescriptorProto_ReservedName_field_number protoreflect.FieldNumber = 5 + EnumDescriptorProto_Visibility_field_number protoreflect.FieldNumber = 6 ) // Names for google.protobuf.EnumDescriptorProto.EnumReservedRange. @@ -1008,29 +1030,35 @@ const ( // Field names for google.protobuf.FeatureSet. const ( - FeatureSet_FieldPresence_field_name protoreflect.Name = "field_presence" - FeatureSet_EnumType_field_name protoreflect.Name = "enum_type" - FeatureSet_RepeatedFieldEncoding_field_name protoreflect.Name = "repeated_field_encoding" - FeatureSet_Utf8Validation_field_name protoreflect.Name = "utf8_validation" - FeatureSet_MessageEncoding_field_name protoreflect.Name = "message_encoding" - FeatureSet_JsonFormat_field_name protoreflect.Name = "json_format" - - FeatureSet_FieldPresence_field_fullname protoreflect.FullName = "google.protobuf.FeatureSet.field_presence" - FeatureSet_EnumType_field_fullname protoreflect.FullName = "google.protobuf.FeatureSet.enum_type" - FeatureSet_RepeatedFieldEncoding_field_fullname protoreflect.FullName = "google.protobuf.FeatureSet.repeated_field_encoding" - FeatureSet_Utf8Validation_field_fullname protoreflect.FullName = "google.protobuf.FeatureSet.utf8_validation" - FeatureSet_MessageEncoding_field_fullname protoreflect.FullName = "google.protobuf.FeatureSet.message_encoding" - FeatureSet_JsonFormat_field_fullname protoreflect.FullName = "google.protobuf.FeatureSet.json_format" + FeatureSet_FieldPresence_field_name protoreflect.Name = "field_presence" + FeatureSet_EnumType_field_name protoreflect.Name = "enum_type" + FeatureSet_RepeatedFieldEncoding_field_name protoreflect.Name = "repeated_field_encoding" + FeatureSet_Utf8Validation_field_name protoreflect.Name = "utf8_validation" + FeatureSet_MessageEncoding_field_name protoreflect.Name = "message_encoding" + FeatureSet_JsonFormat_field_name protoreflect.Name = "json_format" + FeatureSet_EnforceNamingStyle_field_name protoreflect.Name = "enforce_naming_style" + FeatureSet_DefaultSymbolVisibility_field_name protoreflect.Name = "default_symbol_visibility" + + FeatureSet_FieldPresence_field_fullname protoreflect.FullName = "google.protobuf.FeatureSet.field_presence" + FeatureSet_EnumType_field_fullname protoreflect.FullName = "google.protobuf.FeatureSet.enum_type" + FeatureSet_RepeatedFieldEncoding_field_fullname protoreflect.FullName = "google.protobuf.FeatureSet.repeated_field_encoding" + FeatureSet_Utf8Validation_field_fullname protoreflect.FullName = "google.protobuf.FeatureSet.utf8_validation" + FeatureSet_MessageEncoding_field_fullname protoreflect.FullName = "google.protobuf.FeatureSet.message_encoding" + FeatureSet_JsonFormat_field_fullname protoreflect.FullName = "google.protobuf.FeatureSet.json_format" + FeatureSet_EnforceNamingStyle_field_fullname protoreflect.FullName = "google.protobuf.FeatureSet.enforce_naming_style" + FeatureSet_DefaultSymbolVisibility_field_fullname protoreflect.FullName = "google.protobuf.FeatureSet.default_symbol_visibility" ) // Field numbers for google.protobuf.FeatureSet. const ( - FeatureSet_FieldPresence_field_number protoreflect.FieldNumber = 1 - FeatureSet_EnumType_field_number protoreflect.FieldNumber = 2 - FeatureSet_RepeatedFieldEncoding_field_number protoreflect.FieldNumber = 3 - FeatureSet_Utf8Validation_field_number protoreflect.FieldNumber = 4 - FeatureSet_MessageEncoding_field_number protoreflect.FieldNumber = 5 - FeatureSet_JsonFormat_field_number protoreflect.FieldNumber = 6 + FeatureSet_FieldPresence_field_number protoreflect.FieldNumber = 1 + FeatureSet_EnumType_field_number protoreflect.FieldNumber = 2 + FeatureSet_RepeatedFieldEncoding_field_number protoreflect.FieldNumber = 3 + FeatureSet_Utf8Validation_field_number protoreflect.FieldNumber = 4 + FeatureSet_MessageEncoding_field_number protoreflect.FieldNumber = 5 + FeatureSet_JsonFormat_field_number protoreflect.FieldNumber = 6 + FeatureSet_EnforceNamingStyle_field_number protoreflect.FieldNumber = 7 + FeatureSet_DefaultSymbolVisibility_field_number protoreflect.FieldNumber = 8 ) // Full and short names for google.protobuf.FeatureSet.FieldPresence. @@ -1112,6 +1140,40 @@ const ( FeatureSet_LEGACY_BEST_EFFORT_enum_value = 2 ) +// Full and short names for google.protobuf.FeatureSet.EnforceNamingStyle. +const ( + FeatureSet_EnforceNamingStyle_enum_fullname = "google.protobuf.FeatureSet.EnforceNamingStyle" + FeatureSet_EnforceNamingStyle_enum_name = "EnforceNamingStyle" +) + +// Enum values for google.protobuf.FeatureSet.EnforceNamingStyle. +const ( + FeatureSet_ENFORCE_NAMING_STYLE_UNKNOWN_enum_value = 0 + FeatureSet_STYLE2024_enum_value = 1 + FeatureSet_STYLE_LEGACY_enum_value = 2 +) + +// Names for google.protobuf.FeatureSet.VisibilityFeature. +const ( + FeatureSet_VisibilityFeature_message_name protoreflect.Name = "VisibilityFeature" + FeatureSet_VisibilityFeature_message_fullname protoreflect.FullName = "google.protobuf.FeatureSet.VisibilityFeature" +) + +// Full and short names for google.protobuf.FeatureSet.VisibilityFeature.DefaultSymbolVisibility. +const ( + FeatureSet_VisibilityFeature_DefaultSymbolVisibility_enum_fullname = "google.protobuf.FeatureSet.VisibilityFeature.DefaultSymbolVisibility" + FeatureSet_VisibilityFeature_DefaultSymbolVisibility_enum_name = "DefaultSymbolVisibility" +) + +// Enum values for google.protobuf.FeatureSet.VisibilityFeature.DefaultSymbolVisibility. +const ( + FeatureSet_VisibilityFeature_DEFAULT_SYMBOL_VISIBILITY_UNKNOWN_enum_value = 0 + FeatureSet_VisibilityFeature_EXPORT_ALL_enum_value = 1 + FeatureSet_VisibilityFeature_EXPORT_TOP_LEVEL_enum_value = 2 + FeatureSet_VisibilityFeature_LOCAL_ALL_enum_value = 3 + FeatureSet_VisibilityFeature_STRICT_enum_value = 4 +) + // Names for google.protobuf.FeatureSetDefaults. const ( FeatureSetDefaults_message_name protoreflect.Name = "FeatureSetDefaults" diff --git a/vendor/google.golang.org/protobuf/internal/impl/codec_message_opaque.go b/vendor/google.golang.org/protobuf/internal/impl/codec_message_opaque.go index 41c1f74ef8..bdad12a9bb 100644 --- a/vendor/google.golang.org/protobuf/internal/impl/codec_message_opaque.go +++ b/vendor/google.golang.org/protobuf/internal/impl/codec_message_opaque.go @@ -11,6 +11,7 @@ import ( "google.golang.org/protobuf/encoding/protowire" "google.golang.org/protobuf/internal/encoding/messageset" + "google.golang.org/protobuf/internal/filedesc" "google.golang.org/protobuf/internal/order" "google.golang.org/protobuf/reflect/protoreflect" piface "google.golang.org/protobuf/runtime/protoiface" @@ -80,7 +81,7 @@ func (mi *MessageInfo) makeOpaqueCoderMethods(t reflect.Type, si opaqueStructInf // permit us to skip over definitely-unset fields at marshal time. var hasPresence bool - hasPresence, cf.isLazy = usePresenceForField(si, fd) + hasPresence, cf.isLazy = filedesc.UsePresenceForField(fd) if hasPresence { cf.presenceIndex, mi.presenceSize = presenceIndex(mi.Desc, fd) diff --git a/vendor/google.golang.org/protobuf/internal/impl/message_opaque.go b/vendor/google.golang.org/protobuf/internal/impl/message_opaque.go index dd55e8e009..5a439daacb 100644 --- a/vendor/google.golang.org/protobuf/internal/impl/message_opaque.go +++ b/vendor/google.golang.org/protobuf/internal/impl/message_opaque.go @@ -11,6 +11,7 @@ import ( "strings" "sync/atomic" + "google.golang.org/protobuf/internal/filedesc" "google.golang.org/protobuf/reflect/protoreflect" ) @@ -53,7 +54,7 @@ func opaqueInitHook(mi *MessageInfo) bool { fd := fds.Get(i) fs := si.fieldsByNumber[fd.Number()] var fi fieldInfo - usePresence, _ := usePresenceForField(si, fd) + usePresence, _ := filedesc.UsePresenceForField(fd) switch { case fd.ContainingOneof() != nil && !fd.ContainingOneof().IsSynthetic(): @@ -343,17 +344,15 @@ func (mi *MessageInfo) fieldInfoForMessageListOpaqueNoPresence(si opaqueStructIn if p.IsNil() { return false } - sp := p.Apply(fieldOffset).AtomicGetPointer() - if sp.IsNil() { + rv := p.Apply(fieldOffset).AsValueOf(fs.Type).Elem() + if rv.IsNil() { return false } - rv := sp.AsValueOf(fs.Type.Elem()) return rv.Elem().Len() > 0 }, clear: func(p pointer) { - sp := p.Apply(fieldOffset).AtomicGetPointer() - if !sp.IsNil() { - rv := sp.AsValueOf(fs.Type.Elem()) + rv := p.Apply(fieldOffset).AsValueOf(fs.Type).Elem() + if !rv.IsNil() { rv.Elem().Set(reflect.Zero(rv.Type().Elem())) } }, @@ -361,11 +360,10 @@ func (mi *MessageInfo) fieldInfoForMessageListOpaqueNoPresence(si opaqueStructIn if p.IsNil() { return conv.Zero() } - sp := p.Apply(fieldOffset).AtomicGetPointer() - if sp.IsNil() { + rv := p.Apply(fieldOffset).AsValueOf(fs.Type).Elem() + if rv.IsNil() { return conv.Zero() } - rv := sp.AsValueOf(fs.Type.Elem()) if rv.Elem().Len() == 0 { return conv.Zero() } @@ -598,30 +596,3 @@ func (mi *MessageInfo) clearPresent(p pointer, index uint32) { func (mi *MessageInfo) present(p pointer, index uint32) bool { return p.Apply(mi.presenceOffset).PresenceInfo().Present(index) } - -// usePresenceForField implements the somewhat intricate logic of when -// the presence bitmap is used for a field. The main logic is that a -// field that is optional or that can be lazy will use the presence -// bit, but for proto2, also maps have a presence bit. It also records -// if the field can ever be lazy, which is true if we have a -// lazyOffset and the field is a message or a slice of messages. A -// field that is lazy will always need a presence bit. Oneofs are not -// lazy and do not use presence, unless they are a synthetic oneof, -// which is a proto3 optional field. For proto3 optionals, we use the -// presence and they can also be lazy when applicable (a message). -func usePresenceForField(si opaqueStructInfo, fd protoreflect.FieldDescriptor) (usePresence, canBeLazy bool) { - hasLazyField := fd.(interface{ IsLazy() bool }).IsLazy() - - // Non-oneof scalar fields with explicit field presence use the presence array. - usesPresenceArray := fd.HasPresence() && fd.Message() == nil && (fd.ContainingOneof() == nil || fd.ContainingOneof().IsSynthetic()) - switch { - case fd.ContainingOneof() != nil && !fd.ContainingOneof().IsSynthetic(): - return false, false - case fd.IsMap(): - return false, false - case fd.Kind() == protoreflect.MessageKind || fd.Kind() == protoreflect.GroupKind: - return hasLazyField, hasLazyField - default: - return usesPresenceArray || (hasLazyField && fd.HasPresence()), false - } -} diff --git a/vendor/google.golang.org/protobuf/internal/impl/presence.go b/vendor/google.golang.org/protobuf/internal/impl/presence.go index 914cb1deda..443afe81cd 100644 --- a/vendor/google.golang.org/protobuf/internal/impl/presence.go +++ b/vendor/google.golang.org/protobuf/internal/impl/presence.go @@ -32,9 +32,6 @@ func (p presence) toElem(num uint32) (ret *uint32) { // Present checks for the presence of a specific field number in a presence set. func (p presence) Present(num uint32) bool { - if p.P == nil { - return false - } return Export{}.Present(p.toElem(num), num) } diff --git a/vendor/google.golang.org/protobuf/internal/strs/strings_unsafe_go121.go b/vendor/google.golang.org/protobuf/internal/strs/strings_unsafe.go similarity index 99% rename from vendor/google.golang.org/protobuf/internal/strs/strings_unsafe_go121.go rename to vendor/google.golang.org/protobuf/internal/strs/strings_unsafe.go index 1ffddf6877..42dd6f70c6 100644 --- a/vendor/google.golang.org/protobuf/internal/strs/strings_unsafe_go121.go +++ b/vendor/google.golang.org/protobuf/internal/strs/strings_unsafe.go @@ -2,8 +2,6 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -//go:build go1.21 - package strs import ( diff --git a/vendor/google.golang.org/protobuf/internal/strs/strings_unsafe_go120.go b/vendor/google.golang.org/protobuf/internal/strs/strings_unsafe_go120.go deleted file mode 100644 index 832a7988f1..0000000000 --- a/vendor/google.golang.org/protobuf/internal/strs/strings_unsafe_go120.go +++ /dev/null @@ -1,94 +0,0 @@ -// Copyright 2018 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build !go1.21 - -package strs - -import ( - "unsafe" - - "google.golang.org/protobuf/reflect/protoreflect" -) - -type ( - stringHeader struct { - Data unsafe.Pointer - Len int - } - sliceHeader struct { - Data unsafe.Pointer - Len int - Cap int - } -) - -// UnsafeString returns an unsafe string reference of b. -// The caller must treat the input slice as immutable. -// -// WARNING: Use carefully. The returned result must not leak to the end user -// unless the input slice is provably immutable. -func UnsafeString(b []byte) (s string) { - src := (*sliceHeader)(unsafe.Pointer(&b)) - dst := (*stringHeader)(unsafe.Pointer(&s)) - dst.Data = src.Data - dst.Len = src.Len - return s -} - -// UnsafeBytes returns an unsafe bytes slice reference of s. -// The caller must treat returned slice as immutable. -// -// WARNING: Use carefully. The returned result must not leak to the end user. -func UnsafeBytes(s string) (b []byte) { - src := (*stringHeader)(unsafe.Pointer(&s)) - dst := (*sliceHeader)(unsafe.Pointer(&b)) - dst.Data = src.Data - dst.Len = src.Len - dst.Cap = src.Len - return b -} - -// Builder builds a set of strings with shared lifetime. -// This differs from strings.Builder, which is for building a single string. -type Builder struct { - buf []byte -} - -// AppendFullName is equivalent to protoreflect.FullName.Append, -// but optimized for large batches where each name has a shared lifetime. -func (sb *Builder) AppendFullName(prefix protoreflect.FullName, name protoreflect.Name) protoreflect.FullName { - n := len(prefix) + len(".") + len(name) - if len(prefix) == 0 { - n -= len(".") - } - sb.grow(n) - sb.buf = append(sb.buf, prefix...) - sb.buf = append(sb.buf, '.') - sb.buf = append(sb.buf, name...) - return protoreflect.FullName(sb.last(n)) -} - -// MakeString is equivalent to string(b), but optimized for large batches -// with a shared lifetime. -func (sb *Builder) MakeString(b []byte) string { - sb.grow(len(b)) - sb.buf = append(sb.buf, b...) - return sb.last(len(b)) -} - -func (sb *Builder) grow(n int) { - if cap(sb.buf)-len(sb.buf) >= n { - return - } - - // Unlike strings.Builder, we do not need to copy over the contents - // of the old buffer since our builder provides no API for - // retrieving previously created strings. - sb.buf = make([]byte, 0, 2*(cap(sb.buf)+n)) -} - -func (sb *Builder) last(n int) string { - return UnsafeString(sb.buf[len(sb.buf)-n:]) -} diff --git a/vendor/google.golang.org/protobuf/internal/version/version.go b/vendor/google.golang.org/protobuf/internal/version/version.go index 01efc33030..77de0f238c 100644 --- a/vendor/google.golang.org/protobuf/internal/version/version.go +++ b/vendor/google.golang.org/protobuf/internal/version/version.go @@ -52,7 +52,7 @@ import ( const ( Major = 1 Minor = 36 - Patch = 5 + Patch = 10 PreRelease = "" ) diff --git a/vendor/google.golang.org/protobuf/proto/merge.go b/vendor/google.golang.org/protobuf/proto/merge.go index 3c6fe57807..ef55b97dde 100644 --- a/vendor/google.golang.org/protobuf/proto/merge.go +++ b/vendor/google.golang.org/protobuf/proto/merge.go @@ -59,6 +59,12 @@ func Clone(m Message) Message { return dst.Interface() } +// CloneOf returns a deep copy of m. If the top-level message is invalid, +// it returns an invalid message as well. +func CloneOf[M Message](m M) M { + return Clone(m).(M) +} + // mergeOptions provides a namespace for merge functions, and can be // exported in the future if we add user-visible merge options. type mergeOptions struct{} diff --git a/vendor/google.golang.org/protobuf/reflect/protodesc/desc.go b/vendor/google.golang.org/protobuf/reflect/protodesc/desc.go index 823dbf3ba6..9196288e4a 100644 --- a/vendor/google.golang.org/protobuf/reflect/protodesc/desc.go +++ b/vendor/google.golang.org/protobuf/reflect/protodesc/desc.go @@ -152,6 +152,28 @@ func (o FileOptions) New(fd *descriptorpb.FileDescriptorProto, r Resolver) (prot imp := &f.L2.Imports[i] imps.importPublic(imp.Imports()) } + if len(fd.GetOptionDependency()) > 0 { + optionImports := make(filedesc.FileImports, len(fd.GetOptionDependency())) + for i, path := range fd.GetOptionDependency() { + imp := &optionImports[i] + f, err := r.FindFileByPath(path) + if err == protoregistry.NotFound { + // We always allow option imports to be unresolvable. + f = filedesc.PlaceholderFile(path) + } else if err != nil { + return nil, errors.New("could not resolve import %q: %v", path, err) + } + imp.FileDescriptor = f + + if imps[imp.Path()] { + return nil, errors.New("already imported %q", path) + } + imps[imp.Path()] = true + } + f.L2.OptionImports = func() protoreflect.FileImports { + return &optionImports + } + } // Handle source locations. f.L2.Locations.File = f diff --git a/vendor/google.golang.org/protobuf/reflect/protodesc/desc_init.go b/vendor/google.golang.org/protobuf/reflect/protodesc/desc_init.go index 9da34998b1..c826ad0430 100644 --- a/vendor/google.golang.org/protobuf/reflect/protodesc/desc_init.go +++ b/vendor/google.golang.org/protobuf/reflect/protodesc/desc_init.go @@ -29,6 +29,7 @@ func (r descsByName) initEnumDeclarations(eds []*descriptorpb.EnumDescriptorProt e.L2.Options = func() protoreflect.ProtoMessage { return opts } } e.L1.EditionFeatures = mergeEditionFeatures(parent, ed.GetOptions().GetFeatures()) + e.L1.Visibility = int32(ed.GetVisibility()) for _, s := range ed.GetReservedName() { e.L2.ReservedNames.List = append(e.L2.ReservedNames.List, protoreflect.Name(s)) } @@ -70,6 +71,7 @@ func (r descsByName) initMessagesDeclarations(mds []*descriptorpb.DescriptorProt return nil, err } m.L1.EditionFeatures = mergeEditionFeatures(parent, md.GetOptions().GetFeatures()) + m.L1.Visibility = int32(md.GetVisibility()) if opts := md.GetOptions(); opts != nil { opts = proto.Clone(opts).(*descriptorpb.MessageOptions) m.L2.Options = func() protoreflect.ProtoMessage { return opts } diff --git a/vendor/google.golang.org/protobuf/reflect/protodesc/proto.go b/vendor/google.golang.org/protobuf/reflect/protodesc/proto.go index 9b880aa8c9..6f91074e36 100644 --- a/vendor/google.golang.org/protobuf/reflect/protodesc/proto.go +++ b/vendor/google.golang.org/protobuf/reflect/protodesc/proto.go @@ -70,16 +70,27 @@ func ToFileDescriptorProto(file protoreflect.FileDescriptor) *descriptorpb.FileD if syntax := file.Syntax(); syntax != protoreflect.Proto2 && syntax.IsValid() { p.Syntax = proto.String(file.Syntax().String()) } + desc := file + if fileImportDesc, ok := file.(protoreflect.FileImport); ok { + desc = fileImportDesc.FileDescriptor + } if file.Syntax() == protoreflect.Editions { - desc := file - if fileImportDesc, ok := file.(protoreflect.FileImport); ok { - desc = fileImportDesc.FileDescriptor - } - if editionsInterface, ok := desc.(interface{ Edition() int32 }); ok { p.Edition = descriptorpb.Edition(editionsInterface.Edition()).Enum() } } + type hasOptionImports interface { + OptionImports() protoreflect.FileImports + } + if opts, ok := desc.(hasOptionImports); ok { + if optionImports := opts.OptionImports(); optionImports.Len() > 0 { + optionDeps := make([]string, optionImports.Len()) + for i := range optionImports.Len() { + optionDeps[i] = optionImports.Get(i).Path() + } + p.OptionDependency = optionDeps + } + } return p } @@ -123,6 +134,14 @@ func ToDescriptorProto(message protoreflect.MessageDescriptor) *descriptorpb.Des for i, names := 0, message.ReservedNames(); i < names.Len(); i++ { p.ReservedName = append(p.ReservedName, string(names.Get(i))) } + type hasVisibility interface { + Visibility() int32 + } + if vis, ok := message.(hasVisibility); ok { + if visibility := vis.Visibility(); visibility > 0 { + p.Visibility = descriptorpb.SymbolVisibility(visibility).Enum() + } + } return p } @@ -216,6 +235,14 @@ func ToEnumDescriptorProto(enum protoreflect.EnumDescriptor) *descriptorpb.EnumD for i, names := 0, enum.ReservedNames(); i < names.Len(); i++ { p.ReservedName = append(p.ReservedName, string(names.Get(i))) } + type hasVisibility interface { + Visibility() int32 + } + if vis, ok := enum.(hasVisibility); ok { + if visibility := vis.Visibility(); visibility > 0 { + p.Visibility = descriptorpb.SymbolVisibility(visibility).Enum() + } + } return p } diff --git a/vendor/google.golang.org/protobuf/reflect/protoreflect/source_gen.go b/vendor/google.golang.org/protobuf/reflect/protoreflect/source_gen.go index ea154eec44..730331e666 100644 --- a/vendor/google.golang.org/protobuf/reflect/protoreflect/source_gen.go +++ b/vendor/google.golang.org/protobuf/reflect/protoreflect/source_gen.go @@ -21,6 +21,8 @@ func (p *SourcePath) appendFileDescriptorProto(b []byte) []byte { b = p.appendRepeatedField(b, "public_dependency", nil) case 11: b = p.appendRepeatedField(b, "weak_dependency", nil) + case 15: + b = p.appendRepeatedField(b, "option_dependency", nil) case 4: b = p.appendRepeatedField(b, "message_type", (*SourcePath).appendDescriptorProto) case 5: @@ -66,6 +68,8 @@ func (p *SourcePath) appendDescriptorProto(b []byte) []byte { b = p.appendRepeatedField(b, "reserved_range", (*SourcePath).appendDescriptorProto_ReservedRange) case 10: b = p.appendRepeatedField(b, "reserved_name", nil) + case 11: + b = p.appendSingularField(b, "visibility", nil) } return b } @@ -85,6 +89,8 @@ func (p *SourcePath) appendEnumDescriptorProto(b []byte) []byte { b = p.appendRepeatedField(b, "reserved_range", (*SourcePath).appendEnumDescriptorProto_EnumReservedRange) case 5: b = p.appendRepeatedField(b, "reserved_name", nil) + case 6: + b = p.appendSingularField(b, "visibility", nil) } return b } @@ -398,6 +404,10 @@ func (p *SourcePath) appendFeatureSet(b []byte) []byte { b = p.appendSingularField(b, "message_encoding", nil) case 6: b = p.appendSingularField(b, "json_format", nil) + case 7: + b = p.appendSingularField(b, "enforce_naming_style", nil) + case 8: + b = p.appendSingularField(b, "default_symbol_visibility", nil) } return b } diff --git a/vendor/google.golang.org/protobuf/reflect/protoreflect/value_unsafe_go121.go b/vendor/google.golang.org/protobuf/reflect/protoreflect/value_unsafe.go similarity index 99% rename from vendor/google.golang.org/protobuf/reflect/protoreflect/value_unsafe_go121.go rename to vendor/google.golang.org/protobuf/reflect/protoreflect/value_unsafe.go index 479527b58d..fe17f37220 100644 --- a/vendor/google.golang.org/protobuf/reflect/protoreflect/value_unsafe_go121.go +++ b/vendor/google.golang.org/protobuf/reflect/protoreflect/value_unsafe.go @@ -2,8 +2,6 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -//go:build go1.21 - package protoreflect import ( diff --git a/vendor/google.golang.org/protobuf/reflect/protoreflect/value_unsafe_go120.go b/vendor/google.golang.org/protobuf/reflect/protoreflect/value_unsafe_go120.go deleted file mode 100644 index 0015fcb35d..0000000000 --- a/vendor/google.golang.org/protobuf/reflect/protoreflect/value_unsafe_go120.go +++ /dev/null @@ -1,98 +0,0 @@ -// Copyright 2018 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build !go1.21 - -package protoreflect - -import ( - "unsafe" - - "google.golang.org/protobuf/internal/pragma" -) - -type ( - stringHeader struct { - Data unsafe.Pointer - Len int - } - sliceHeader struct { - Data unsafe.Pointer - Len int - Cap int - } - ifaceHeader struct { - Type unsafe.Pointer - Data unsafe.Pointer - } -) - -var ( - nilType = typeOf(nil) - boolType = typeOf(*new(bool)) - int32Type = typeOf(*new(int32)) - int64Type = typeOf(*new(int64)) - uint32Type = typeOf(*new(uint32)) - uint64Type = typeOf(*new(uint64)) - float32Type = typeOf(*new(float32)) - float64Type = typeOf(*new(float64)) - stringType = typeOf(*new(string)) - bytesType = typeOf(*new([]byte)) - enumType = typeOf(*new(EnumNumber)) -) - -// typeOf returns a pointer to the Go type information. -// The pointer is comparable and equal if and only if the types are identical. -func typeOf(t any) unsafe.Pointer { - return (*ifaceHeader)(unsafe.Pointer(&t)).Type -} - -// value is a union where only one type can be represented at a time. -// The struct is 24B large on 64-bit systems and requires the minimum storage -// necessary to represent each possible type. -// -// The Go GC needs to be able to scan variables containing pointers. -// As such, pointers and non-pointers cannot be intermixed. -type value struct { - pragma.DoNotCompare // 0B - - // typ stores the type of the value as a pointer to the Go type. - typ unsafe.Pointer // 8B - - // ptr stores the data pointer for a String, Bytes, or interface value. - ptr unsafe.Pointer // 8B - - // num stores a Bool, Int32, Int64, Uint32, Uint64, Float32, Float64, or - // Enum value as a raw uint64. - // - // It is also used to store the length of a String or Bytes value; - // the capacity is ignored. - num uint64 // 8B -} - -func valueOfString(v string) Value { - p := (*stringHeader)(unsafe.Pointer(&v)) - return Value{typ: stringType, ptr: p.Data, num: uint64(len(v))} -} -func valueOfBytes(v []byte) Value { - p := (*sliceHeader)(unsafe.Pointer(&v)) - return Value{typ: bytesType, ptr: p.Data, num: uint64(len(v))} -} -func valueOfIface(v any) Value { - p := (*ifaceHeader)(unsafe.Pointer(&v)) - return Value{typ: p.Type, ptr: p.Data} -} - -func (v Value) getString() (x string) { - *(*stringHeader)(unsafe.Pointer(&x)) = stringHeader{Data: v.ptr, Len: int(v.num)} - return x -} -func (v Value) getBytes() (x []byte) { - *(*sliceHeader)(unsafe.Pointer(&x)) = sliceHeader{Data: v.ptr, Len: int(v.num), Cap: int(v.num)} - return x -} -func (v Value) getIface() (x any) { - *(*ifaceHeader)(unsafe.Pointer(&x)) = ifaceHeader{Type: v.typ, Data: v.ptr} - return x -} diff --git a/vendor/google.golang.org/protobuf/types/descriptorpb/descriptor.pb.go b/vendor/google.golang.org/protobuf/types/descriptorpb/descriptor.pb.go index a516337674..4eacb523c3 100644 --- a/vendor/google.golang.org/protobuf/types/descriptorpb/descriptor.pb.go +++ b/vendor/google.golang.org/protobuf/types/descriptorpb/descriptor.pb.go @@ -151,6 +151,70 @@ func (Edition) EnumDescriptor() ([]byte, []int) { return file_google_protobuf_descriptor_proto_rawDescGZIP(), []int{0} } +// Describes the 'visibility' of a symbol with respect to the proto import +// system. Symbols can only be imported when the visibility rules do not prevent +// it (ex: local symbols cannot be imported). Visibility modifiers can only set +// on `message` and `enum` as they are the only types available to be referenced +// from other files. +type SymbolVisibility int32 + +const ( + SymbolVisibility_VISIBILITY_UNSET SymbolVisibility = 0 + SymbolVisibility_VISIBILITY_LOCAL SymbolVisibility = 1 + SymbolVisibility_VISIBILITY_EXPORT SymbolVisibility = 2 +) + +// Enum value maps for SymbolVisibility. +var ( + SymbolVisibility_name = map[int32]string{ + 0: "VISIBILITY_UNSET", + 1: "VISIBILITY_LOCAL", + 2: "VISIBILITY_EXPORT", + } + SymbolVisibility_value = map[string]int32{ + "VISIBILITY_UNSET": 0, + "VISIBILITY_LOCAL": 1, + "VISIBILITY_EXPORT": 2, + } +) + +func (x SymbolVisibility) Enum() *SymbolVisibility { + p := new(SymbolVisibility) + *p = x + return p +} + +func (x SymbolVisibility) String() string { + return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) +} + +func (SymbolVisibility) Descriptor() protoreflect.EnumDescriptor { + return file_google_protobuf_descriptor_proto_enumTypes[1].Descriptor() +} + +func (SymbolVisibility) Type() protoreflect.EnumType { + return &file_google_protobuf_descriptor_proto_enumTypes[1] +} + +func (x SymbolVisibility) Number() protoreflect.EnumNumber { + return protoreflect.EnumNumber(x) +} + +// Deprecated: Do not use. +func (x *SymbolVisibility) UnmarshalJSON(b []byte) error { + num, err := protoimpl.X.UnmarshalJSONEnum(x.Descriptor(), b) + if err != nil { + return err + } + *x = SymbolVisibility(num) + return nil +} + +// Deprecated: Use SymbolVisibility.Descriptor instead. +func (SymbolVisibility) EnumDescriptor() ([]byte, []int) { + return file_google_protobuf_descriptor_proto_rawDescGZIP(), []int{1} +} + // The verification state of the extension range. type ExtensionRangeOptions_VerificationState int32 @@ -183,11 +247,11 @@ func (x ExtensionRangeOptions_VerificationState) String() string { } func (ExtensionRangeOptions_VerificationState) Descriptor() protoreflect.EnumDescriptor { - return file_google_protobuf_descriptor_proto_enumTypes[1].Descriptor() + return file_google_protobuf_descriptor_proto_enumTypes[2].Descriptor() } func (ExtensionRangeOptions_VerificationState) Type() protoreflect.EnumType { - return &file_google_protobuf_descriptor_proto_enumTypes[1] + return &file_google_protobuf_descriptor_proto_enumTypes[2] } func (x ExtensionRangeOptions_VerificationState) Number() protoreflect.EnumNumber { @@ -299,11 +363,11 @@ func (x FieldDescriptorProto_Type) String() string { } func (FieldDescriptorProto_Type) Descriptor() protoreflect.EnumDescriptor { - return file_google_protobuf_descriptor_proto_enumTypes[2].Descriptor() + return file_google_protobuf_descriptor_proto_enumTypes[3].Descriptor() } func (FieldDescriptorProto_Type) Type() protoreflect.EnumType { - return &file_google_protobuf_descriptor_proto_enumTypes[2] + return &file_google_protobuf_descriptor_proto_enumTypes[3] } func (x FieldDescriptorProto_Type) Number() protoreflect.EnumNumber { @@ -362,11 +426,11 @@ func (x FieldDescriptorProto_Label) String() string { } func (FieldDescriptorProto_Label) Descriptor() protoreflect.EnumDescriptor { - return file_google_protobuf_descriptor_proto_enumTypes[3].Descriptor() + return file_google_protobuf_descriptor_proto_enumTypes[4].Descriptor() } func (FieldDescriptorProto_Label) Type() protoreflect.EnumType { - return &file_google_protobuf_descriptor_proto_enumTypes[3] + return &file_google_protobuf_descriptor_proto_enumTypes[4] } func (x FieldDescriptorProto_Label) Number() protoreflect.EnumNumber { @@ -423,11 +487,11 @@ func (x FileOptions_OptimizeMode) String() string { } func (FileOptions_OptimizeMode) Descriptor() protoreflect.EnumDescriptor { - return file_google_protobuf_descriptor_proto_enumTypes[4].Descriptor() + return file_google_protobuf_descriptor_proto_enumTypes[5].Descriptor() } func (FileOptions_OptimizeMode) Type() protoreflect.EnumType { - return &file_google_protobuf_descriptor_proto_enumTypes[4] + return &file_google_protobuf_descriptor_proto_enumTypes[5] } func (x FileOptions_OptimizeMode) Number() protoreflect.EnumNumber { @@ -489,11 +553,11 @@ func (x FieldOptions_CType) String() string { } func (FieldOptions_CType) Descriptor() protoreflect.EnumDescriptor { - return file_google_protobuf_descriptor_proto_enumTypes[5].Descriptor() + return file_google_protobuf_descriptor_proto_enumTypes[6].Descriptor() } func (FieldOptions_CType) Type() protoreflect.EnumType { - return &file_google_protobuf_descriptor_proto_enumTypes[5] + return &file_google_protobuf_descriptor_proto_enumTypes[6] } func (x FieldOptions_CType) Number() protoreflect.EnumNumber { @@ -551,11 +615,11 @@ func (x FieldOptions_JSType) String() string { } func (FieldOptions_JSType) Descriptor() protoreflect.EnumDescriptor { - return file_google_protobuf_descriptor_proto_enumTypes[6].Descriptor() + return file_google_protobuf_descriptor_proto_enumTypes[7].Descriptor() } func (FieldOptions_JSType) Type() protoreflect.EnumType { - return &file_google_protobuf_descriptor_proto_enumTypes[6] + return &file_google_protobuf_descriptor_proto_enumTypes[7] } func (x FieldOptions_JSType) Number() protoreflect.EnumNumber { @@ -611,11 +675,11 @@ func (x FieldOptions_OptionRetention) String() string { } func (FieldOptions_OptionRetention) Descriptor() protoreflect.EnumDescriptor { - return file_google_protobuf_descriptor_proto_enumTypes[7].Descriptor() + return file_google_protobuf_descriptor_proto_enumTypes[8].Descriptor() } func (FieldOptions_OptionRetention) Type() protoreflect.EnumType { - return &file_google_protobuf_descriptor_proto_enumTypes[7] + return &file_google_protobuf_descriptor_proto_enumTypes[8] } func (x FieldOptions_OptionRetention) Number() protoreflect.EnumNumber { @@ -694,11 +758,11 @@ func (x FieldOptions_OptionTargetType) String() string { } func (FieldOptions_OptionTargetType) Descriptor() protoreflect.EnumDescriptor { - return file_google_protobuf_descriptor_proto_enumTypes[8].Descriptor() + return file_google_protobuf_descriptor_proto_enumTypes[9].Descriptor() } func (FieldOptions_OptionTargetType) Type() protoreflect.EnumType { - return &file_google_protobuf_descriptor_proto_enumTypes[8] + return &file_google_protobuf_descriptor_proto_enumTypes[9] } func (x FieldOptions_OptionTargetType) Number() protoreflect.EnumNumber { @@ -756,11 +820,11 @@ func (x MethodOptions_IdempotencyLevel) String() string { } func (MethodOptions_IdempotencyLevel) Descriptor() protoreflect.EnumDescriptor { - return file_google_protobuf_descriptor_proto_enumTypes[9].Descriptor() + return file_google_protobuf_descriptor_proto_enumTypes[10].Descriptor() } func (MethodOptions_IdempotencyLevel) Type() protoreflect.EnumType { - return &file_google_protobuf_descriptor_proto_enumTypes[9] + return &file_google_protobuf_descriptor_proto_enumTypes[10] } func (x MethodOptions_IdempotencyLevel) Number() protoreflect.EnumNumber { @@ -818,11 +882,11 @@ func (x FeatureSet_FieldPresence) String() string { } func (FeatureSet_FieldPresence) Descriptor() protoreflect.EnumDescriptor { - return file_google_protobuf_descriptor_proto_enumTypes[10].Descriptor() + return file_google_protobuf_descriptor_proto_enumTypes[11].Descriptor() } func (FeatureSet_FieldPresence) Type() protoreflect.EnumType { - return &file_google_protobuf_descriptor_proto_enumTypes[10] + return &file_google_protobuf_descriptor_proto_enumTypes[11] } func (x FeatureSet_FieldPresence) Number() protoreflect.EnumNumber { @@ -877,11 +941,11 @@ func (x FeatureSet_EnumType) String() string { } func (FeatureSet_EnumType) Descriptor() protoreflect.EnumDescriptor { - return file_google_protobuf_descriptor_proto_enumTypes[11].Descriptor() + return file_google_protobuf_descriptor_proto_enumTypes[12].Descriptor() } func (FeatureSet_EnumType) Type() protoreflect.EnumType { - return &file_google_protobuf_descriptor_proto_enumTypes[11] + return &file_google_protobuf_descriptor_proto_enumTypes[12] } func (x FeatureSet_EnumType) Number() protoreflect.EnumNumber { @@ -936,11 +1000,11 @@ func (x FeatureSet_RepeatedFieldEncoding) String() string { } func (FeatureSet_RepeatedFieldEncoding) Descriptor() protoreflect.EnumDescriptor { - return file_google_protobuf_descriptor_proto_enumTypes[12].Descriptor() + return file_google_protobuf_descriptor_proto_enumTypes[13].Descriptor() } func (FeatureSet_RepeatedFieldEncoding) Type() protoreflect.EnumType { - return &file_google_protobuf_descriptor_proto_enumTypes[12] + return &file_google_protobuf_descriptor_proto_enumTypes[13] } func (x FeatureSet_RepeatedFieldEncoding) Number() protoreflect.EnumNumber { @@ -995,11 +1059,11 @@ func (x FeatureSet_Utf8Validation) String() string { } func (FeatureSet_Utf8Validation) Descriptor() protoreflect.EnumDescriptor { - return file_google_protobuf_descriptor_proto_enumTypes[13].Descriptor() + return file_google_protobuf_descriptor_proto_enumTypes[14].Descriptor() } func (FeatureSet_Utf8Validation) Type() protoreflect.EnumType { - return &file_google_protobuf_descriptor_proto_enumTypes[13] + return &file_google_protobuf_descriptor_proto_enumTypes[14] } func (x FeatureSet_Utf8Validation) Number() protoreflect.EnumNumber { @@ -1054,11 +1118,11 @@ func (x FeatureSet_MessageEncoding) String() string { } func (FeatureSet_MessageEncoding) Descriptor() protoreflect.EnumDescriptor { - return file_google_protobuf_descriptor_proto_enumTypes[14].Descriptor() + return file_google_protobuf_descriptor_proto_enumTypes[15].Descriptor() } func (FeatureSet_MessageEncoding) Type() protoreflect.EnumType { - return &file_google_protobuf_descriptor_proto_enumTypes[14] + return &file_google_protobuf_descriptor_proto_enumTypes[15] } func (x FeatureSet_MessageEncoding) Number() protoreflect.EnumNumber { @@ -1113,11 +1177,11 @@ func (x FeatureSet_JsonFormat) String() string { } func (FeatureSet_JsonFormat) Descriptor() protoreflect.EnumDescriptor { - return file_google_protobuf_descriptor_proto_enumTypes[15].Descriptor() + return file_google_protobuf_descriptor_proto_enumTypes[16].Descriptor() } func (FeatureSet_JsonFormat) Type() protoreflect.EnumType { - return &file_google_protobuf_descriptor_proto_enumTypes[15] + return &file_google_protobuf_descriptor_proto_enumTypes[16] } func (x FeatureSet_JsonFormat) Number() protoreflect.EnumNumber { @@ -1139,6 +1203,136 @@ func (FeatureSet_JsonFormat) EnumDescriptor() ([]byte, []int) { return file_google_protobuf_descriptor_proto_rawDescGZIP(), []int{19, 5} } +type FeatureSet_EnforceNamingStyle int32 + +const ( + FeatureSet_ENFORCE_NAMING_STYLE_UNKNOWN FeatureSet_EnforceNamingStyle = 0 + FeatureSet_STYLE2024 FeatureSet_EnforceNamingStyle = 1 + FeatureSet_STYLE_LEGACY FeatureSet_EnforceNamingStyle = 2 +) + +// Enum value maps for FeatureSet_EnforceNamingStyle. +var ( + FeatureSet_EnforceNamingStyle_name = map[int32]string{ + 0: "ENFORCE_NAMING_STYLE_UNKNOWN", + 1: "STYLE2024", + 2: "STYLE_LEGACY", + } + FeatureSet_EnforceNamingStyle_value = map[string]int32{ + "ENFORCE_NAMING_STYLE_UNKNOWN": 0, + "STYLE2024": 1, + "STYLE_LEGACY": 2, + } +) + +func (x FeatureSet_EnforceNamingStyle) Enum() *FeatureSet_EnforceNamingStyle { + p := new(FeatureSet_EnforceNamingStyle) + *p = x + return p +} + +func (x FeatureSet_EnforceNamingStyle) String() string { + return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) +} + +func (FeatureSet_EnforceNamingStyle) Descriptor() protoreflect.EnumDescriptor { + return file_google_protobuf_descriptor_proto_enumTypes[17].Descriptor() +} + +func (FeatureSet_EnforceNamingStyle) Type() protoreflect.EnumType { + return &file_google_protobuf_descriptor_proto_enumTypes[17] +} + +func (x FeatureSet_EnforceNamingStyle) Number() protoreflect.EnumNumber { + return protoreflect.EnumNumber(x) +} + +// Deprecated: Do not use. +func (x *FeatureSet_EnforceNamingStyle) UnmarshalJSON(b []byte) error { + num, err := protoimpl.X.UnmarshalJSONEnum(x.Descriptor(), b) + if err != nil { + return err + } + *x = FeatureSet_EnforceNamingStyle(num) + return nil +} + +// Deprecated: Use FeatureSet_EnforceNamingStyle.Descriptor instead. +func (FeatureSet_EnforceNamingStyle) EnumDescriptor() ([]byte, []int) { + return file_google_protobuf_descriptor_proto_rawDescGZIP(), []int{19, 6} +} + +type FeatureSet_VisibilityFeature_DefaultSymbolVisibility int32 + +const ( + FeatureSet_VisibilityFeature_DEFAULT_SYMBOL_VISIBILITY_UNKNOWN FeatureSet_VisibilityFeature_DefaultSymbolVisibility = 0 + // Default pre-EDITION_2024, all UNSET visibility are export. + FeatureSet_VisibilityFeature_EXPORT_ALL FeatureSet_VisibilityFeature_DefaultSymbolVisibility = 1 + // All top-level symbols default to export, nested default to local. + FeatureSet_VisibilityFeature_EXPORT_TOP_LEVEL FeatureSet_VisibilityFeature_DefaultSymbolVisibility = 2 + // All symbols default to local. + FeatureSet_VisibilityFeature_LOCAL_ALL FeatureSet_VisibilityFeature_DefaultSymbolVisibility = 3 + // All symbols local by default. Nested types cannot be exported. + // With special case caveat for message { enum {} reserved 1 to max; } + // This is the recommended setting for new protos. + FeatureSet_VisibilityFeature_STRICT FeatureSet_VisibilityFeature_DefaultSymbolVisibility = 4 +) + +// Enum value maps for FeatureSet_VisibilityFeature_DefaultSymbolVisibility. +var ( + FeatureSet_VisibilityFeature_DefaultSymbolVisibility_name = map[int32]string{ + 0: "DEFAULT_SYMBOL_VISIBILITY_UNKNOWN", + 1: "EXPORT_ALL", + 2: "EXPORT_TOP_LEVEL", + 3: "LOCAL_ALL", + 4: "STRICT", + } + FeatureSet_VisibilityFeature_DefaultSymbolVisibility_value = map[string]int32{ + "DEFAULT_SYMBOL_VISIBILITY_UNKNOWN": 0, + "EXPORT_ALL": 1, + "EXPORT_TOP_LEVEL": 2, + "LOCAL_ALL": 3, + "STRICT": 4, + } +) + +func (x FeatureSet_VisibilityFeature_DefaultSymbolVisibility) Enum() *FeatureSet_VisibilityFeature_DefaultSymbolVisibility { + p := new(FeatureSet_VisibilityFeature_DefaultSymbolVisibility) + *p = x + return p +} + +func (x FeatureSet_VisibilityFeature_DefaultSymbolVisibility) String() string { + return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) +} + +func (FeatureSet_VisibilityFeature_DefaultSymbolVisibility) Descriptor() protoreflect.EnumDescriptor { + return file_google_protobuf_descriptor_proto_enumTypes[18].Descriptor() +} + +func (FeatureSet_VisibilityFeature_DefaultSymbolVisibility) Type() protoreflect.EnumType { + return &file_google_protobuf_descriptor_proto_enumTypes[18] +} + +func (x FeatureSet_VisibilityFeature_DefaultSymbolVisibility) Number() protoreflect.EnumNumber { + return protoreflect.EnumNumber(x) +} + +// Deprecated: Do not use. +func (x *FeatureSet_VisibilityFeature_DefaultSymbolVisibility) UnmarshalJSON(b []byte) error { + num, err := protoimpl.X.UnmarshalJSONEnum(x.Descriptor(), b) + if err != nil { + return err + } + *x = FeatureSet_VisibilityFeature_DefaultSymbolVisibility(num) + return nil +} + +// Deprecated: Use FeatureSet_VisibilityFeature_DefaultSymbolVisibility.Descriptor instead. +func (FeatureSet_VisibilityFeature_DefaultSymbolVisibility) EnumDescriptor() ([]byte, []int) { + return file_google_protobuf_descriptor_proto_rawDescGZIP(), []int{19, 0, 0} +} + // Represents the identified object's effect on the element in the original // .proto file. type GeneratedCodeInfo_Annotation_Semantic int32 @@ -1177,11 +1371,11 @@ func (x GeneratedCodeInfo_Annotation_Semantic) String() string { } func (GeneratedCodeInfo_Annotation_Semantic) Descriptor() protoreflect.EnumDescriptor { - return file_google_protobuf_descriptor_proto_enumTypes[16].Descriptor() + return file_google_protobuf_descriptor_proto_enumTypes[19].Descriptor() } func (GeneratedCodeInfo_Annotation_Semantic) Type() protoreflect.EnumType { - return &file_google_protobuf_descriptor_proto_enumTypes[16] + return &file_google_protobuf_descriptor_proto_enumTypes[19] } func (x GeneratedCodeInfo_Annotation_Semantic) Number() protoreflect.EnumNumber { @@ -1262,6 +1456,9 @@ type FileDescriptorProto struct { // Indexes of the weak imported files in the dependency list. // For Google-internal migration only. Do not use. WeakDependency []int32 `protobuf:"varint,11,rep,name=weak_dependency,json=weakDependency" json:"weak_dependency,omitempty"` + // Names of files imported by this file purely for the purpose of providing + // option extensions. These are excluded from the dependency list above. + OptionDependency []string `protobuf:"bytes,15,rep,name=option_dependency,json=optionDependency" json:"option_dependency,omitempty"` // All top-level definitions in this file. MessageType []*DescriptorProto `protobuf:"bytes,4,rep,name=message_type,json=messageType" json:"message_type,omitempty"` EnumType []*EnumDescriptorProto `protobuf:"bytes,5,rep,name=enum_type,json=enumType" json:"enum_type,omitempty"` @@ -1277,8 +1474,14 @@ type FileDescriptorProto struct { // The supported values are "proto2", "proto3", and "editions". // // If `edition` is present, this value must be "editions". + // WARNING: This field should only be used by protobuf plugins or special + // cases like the proto compiler. Other uses are discouraged and + // developers should rely on the protoreflect APIs for their client language. Syntax *string `protobuf:"bytes,12,opt,name=syntax" json:"syntax,omitempty"` // The edition of the proto file. + // WARNING: This field should only be used by protobuf plugins or special + // cases like the proto compiler. Other uses are discouraged and + // developers should rely on the protoreflect APIs for their client language. Edition *Edition `protobuf:"varint,14,opt,name=edition,enum=google.protobuf.Edition" json:"edition,omitempty"` unknownFields protoimpl.UnknownFields sizeCache protoimpl.SizeCache @@ -1349,6 +1552,13 @@ func (x *FileDescriptorProto) GetWeakDependency() []int32 { return nil } +func (x *FileDescriptorProto) GetOptionDependency() []string { + if x != nil { + return x.OptionDependency + } + return nil +} + func (x *FileDescriptorProto) GetMessageType() []*DescriptorProto { if x != nil { return x.MessageType @@ -1419,7 +1629,9 @@ type DescriptorProto struct { ReservedRange []*DescriptorProto_ReservedRange `protobuf:"bytes,9,rep,name=reserved_range,json=reservedRange" json:"reserved_range,omitempty"` // Reserved field names, which may not be used by fields in the same message. // A given name may only be reserved once. - ReservedName []string `protobuf:"bytes,10,rep,name=reserved_name,json=reservedName" json:"reserved_name,omitempty"` + ReservedName []string `protobuf:"bytes,10,rep,name=reserved_name,json=reservedName" json:"reserved_name,omitempty"` + // Support for `export` and `local` keywords on enums. + Visibility *SymbolVisibility `protobuf:"varint,11,opt,name=visibility,enum=google.protobuf.SymbolVisibility" json:"visibility,omitempty"` unknownFields protoimpl.UnknownFields sizeCache protoimpl.SizeCache } @@ -1524,6 +1736,13 @@ func (x *DescriptorProto) GetReservedName() []string { return nil } +func (x *DescriptorProto) GetVisibility() SymbolVisibility { + if x != nil && x.Visibility != nil { + return *x.Visibility + } + return SymbolVisibility_VISIBILITY_UNSET +} + type ExtensionRangeOptions struct { state protoimpl.MessageState `protogen:"open.v1"` // The parser stores options it doesn't recognize here. See above. @@ -1836,7 +2055,9 @@ type EnumDescriptorProto struct { ReservedRange []*EnumDescriptorProto_EnumReservedRange `protobuf:"bytes,4,rep,name=reserved_range,json=reservedRange" json:"reserved_range,omitempty"` // Reserved enum value names, which may not be reused. A given name may only // be reserved once. - ReservedName []string `protobuf:"bytes,5,rep,name=reserved_name,json=reservedName" json:"reserved_name,omitempty"` + ReservedName []string `protobuf:"bytes,5,rep,name=reserved_name,json=reservedName" json:"reserved_name,omitempty"` + // Support for `export` and `local` keywords on enums. + Visibility *SymbolVisibility `protobuf:"varint,6,opt,name=visibility,enum=google.protobuf.SymbolVisibility" json:"visibility,omitempty"` unknownFields protoimpl.UnknownFields sizeCache protoimpl.SizeCache } @@ -1906,6 +2127,13 @@ func (x *EnumDescriptorProto) GetReservedName() []string { return nil } +func (x *EnumDescriptorProto) GetVisibility() SymbolVisibility { + if x != nil && x.Visibility != nil { + return *x.Visibility + } + return SymbolVisibility_VISIBILITY_UNSET +} + // Describes a value within an enum. type EnumValueDescriptorProto struct { state protoimpl.MessageState `protogen:"open.v1"` @@ -2212,6 +2440,9 @@ type FileOptions struct { // determining the ruby package. RubyPackage *string `protobuf:"bytes,45,opt,name=ruby_package,json=rubyPackage" json:"ruby_package,omitempty"` // Any features defined in the specific edition. + // WARNING: This field should only be used by protobuf plugins or special + // cases like the proto compiler. Other uses are discouraged and + // developers should rely on the protoreflect APIs for their client language. Features *FeatureSet `protobuf:"bytes,50,opt,name=features" json:"features,omitempty"` // The parser stores options it doesn't recognize here. // See the documentation for the "Options" section above. @@ -2482,6 +2713,9 @@ type MessageOptions struct { // Deprecated: Marked as deprecated in google/protobuf/descriptor.proto. DeprecatedLegacyJsonFieldConflicts *bool `protobuf:"varint,11,opt,name=deprecated_legacy_json_field_conflicts,json=deprecatedLegacyJsonFieldConflicts" json:"deprecated_legacy_json_field_conflicts,omitempty"` // Any features defined in the specific edition. + // WARNING: This field should only be used by protobuf plugins or special + // cases like the proto compiler. Other uses are discouraged and + // developers should rely on the protoreflect APIs for their client language. Features *FeatureSet `protobuf:"bytes,12,opt,name=features" json:"features,omitempty"` // The parser stores options it doesn't recognize here. See above. UninterpretedOption []*UninterpretedOption `protobuf:"bytes,999,rep,name=uninterpreted_option,json=uninterpretedOption" json:"uninterpreted_option,omitempty"` @@ -2639,7 +2873,10 @@ type FieldOptions struct { // for accessors, or it will be completely ignored; in the very least, this // is a formalization for deprecating fields. Deprecated *bool `protobuf:"varint,3,opt,name=deprecated,def=0" json:"deprecated,omitempty"` + // DEPRECATED. DO NOT USE! // For Google-internal migration only. Do not use. + // + // Deprecated: Marked as deprecated in google/protobuf/descriptor.proto. Weak *bool `protobuf:"varint,10,opt,name=weak,def=0" json:"weak,omitempty"` // Indicate that the field value should not be printed out when using debug // formats, e.g. when the field contains sensitive credentials. @@ -2648,6 +2885,9 @@ type FieldOptions struct { Targets []FieldOptions_OptionTargetType `protobuf:"varint,19,rep,name=targets,enum=google.protobuf.FieldOptions_OptionTargetType" json:"targets,omitempty"` EditionDefaults []*FieldOptions_EditionDefault `protobuf:"bytes,20,rep,name=edition_defaults,json=editionDefaults" json:"edition_defaults,omitempty"` // Any features defined in the specific edition. + // WARNING: This field should only be used by protobuf plugins or special + // cases like the proto compiler. Other uses are discouraged and + // developers should rely on the protoreflect APIs for their client language. Features *FeatureSet `protobuf:"bytes,21,opt,name=features" json:"features,omitempty"` FeatureSupport *FieldOptions_FeatureSupport `protobuf:"bytes,22,opt,name=feature_support,json=featureSupport" json:"feature_support,omitempty"` // The parser stores options it doesn't recognize here. See above. @@ -2740,6 +2980,7 @@ func (x *FieldOptions) GetDeprecated() bool { return Default_FieldOptions_Deprecated } +// Deprecated: Marked as deprecated in google/protobuf/descriptor.proto. func (x *FieldOptions) GetWeak() bool { if x != nil && x.Weak != nil { return *x.Weak @@ -2799,6 +3040,9 @@ func (x *FieldOptions) GetUninterpretedOption() []*UninterpretedOption { type OneofOptions struct { state protoimpl.MessageState `protogen:"open.v1"` // Any features defined in the specific edition. + // WARNING: This field should only be used by protobuf plugins or special + // cases like the proto compiler. Other uses are discouraged and + // developers should rely on the protoreflect APIs for their client language. Features *FeatureSet `protobuf:"bytes,1,opt,name=features" json:"features,omitempty"` // The parser stores options it doesn't recognize here. See above. UninterpretedOption []*UninterpretedOption `protobuf:"bytes,999,rep,name=uninterpreted_option,json=uninterpretedOption" json:"uninterpreted_option,omitempty"` @@ -2871,6 +3115,9 @@ type EnumOptions struct { // Deprecated: Marked as deprecated in google/protobuf/descriptor.proto. DeprecatedLegacyJsonFieldConflicts *bool `protobuf:"varint,6,opt,name=deprecated_legacy_json_field_conflicts,json=deprecatedLegacyJsonFieldConflicts" json:"deprecated_legacy_json_field_conflicts,omitempty"` // Any features defined in the specific edition. + // WARNING: This field should only be used by protobuf plugins or special + // cases like the proto compiler. Other uses are discouraged and + // developers should rely on the protoreflect APIs for their client language. Features *FeatureSet `protobuf:"bytes,7,opt,name=features" json:"features,omitempty"` // The parser stores options it doesn't recognize here. See above. UninterpretedOption []*UninterpretedOption `protobuf:"bytes,999,rep,name=uninterpreted_option,json=uninterpretedOption" json:"uninterpreted_option,omitempty"` @@ -2958,6 +3205,9 @@ type EnumValueOptions struct { // this is a formalization for deprecating enum values. Deprecated *bool `protobuf:"varint,1,opt,name=deprecated,def=0" json:"deprecated,omitempty"` // Any features defined in the specific edition. + // WARNING: This field should only be used by protobuf plugins or special + // cases like the proto compiler. Other uses are discouraged and + // developers should rely on the protoreflect APIs for their client language. Features *FeatureSet `protobuf:"bytes,2,opt,name=features" json:"features,omitempty"` // Indicate that fields annotated with this enum value should not be printed // out when using debug formats, e.g. when the field contains sensitive @@ -3046,6 +3296,9 @@ func (x *EnumValueOptions) GetUninterpretedOption() []*UninterpretedOption { type ServiceOptions struct { state protoimpl.MessageState `protogen:"open.v1"` // Any features defined in the specific edition. + // WARNING: This field should only be used by protobuf plugins or special + // cases like the proto compiler. Other uses are discouraged and + // developers should rely on the protoreflect APIs for their client language. Features *FeatureSet `protobuf:"bytes,34,opt,name=features" json:"features,omitempty"` // Is this service deprecated? // Depending on the target platform, this can emit Deprecated annotations @@ -3124,6 +3377,9 @@ type MethodOptions struct { Deprecated *bool `protobuf:"varint,33,opt,name=deprecated,def=0" json:"deprecated,omitempty"` IdempotencyLevel *MethodOptions_IdempotencyLevel `protobuf:"varint,34,opt,name=idempotency_level,json=idempotencyLevel,enum=google.protobuf.MethodOptions_IdempotencyLevel,def=0" json:"idempotency_level,omitempty"` // Any features defined in the specific edition. + // WARNING: This field should only be used by protobuf plugins or special + // cases like the proto compiler. Other uses are discouraged and + // developers should rely on the protoreflect APIs for their client language. Features *FeatureSet `protobuf:"bytes,35,opt,name=features" json:"features,omitempty"` // The parser stores options it doesn't recognize here. See above. UninterpretedOption []*UninterpretedOption `protobuf:"bytes,999,rep,name=uninterpreted_option,json=uninterpretedOption" json:"uninterpreted_option,omitempty"` @@ -3303,16 +3559,18 @@ func (x *UninterpretedOption) GetAggregateValue() string { // be designed and implemented to handle this, hopefully before we ever hit a // conflict here. type FeatureSet struct { - state protoimpl.MessageState `protogen:"open.v1"` - FieldPresence *FeatureSet_FieldPresence `protobuf:"varint,1,opt,name=field_presence,json=fieldPresence,enum=google.protobuf.FeatureSet_FieldPresence" json:"field_presence,omitempty"` - EnumType *FeatureSet_EnumType `protobuf:"varint,2,opt,name=enum_type,json=enumType,enum=google.protobuf.FeatureSet_EnumType" json:"enum_type,omitempty"` - RepeatedFieldEncoding *FeatureSet_RepeatedFieldEncoding `protobuf:"varint,3,opt,name=repeated_field_encoding,json=repeatedFieldEncoding,enum=google.protobuf.FeatureSet_RepeatedFieldEncoding" json:"repeated_field_encoding,omitempty"` - Utf8Validation *FeatureSet_Utf8Validation `protobuf:"varint,4,opt,name=utf8_validation,json=utf8Validation,enum=google.protobuf.FeatureSet_Utf8Validation" json:"utf8_validation,omitempty"` - MessageEncoding *FeatureSet_MessageEncoding `protobuf:"varint,5,opt,name=message_encoding,json=messageEncoding,enum=google.protobuf.FeatureSet_MessageEncoding" json:"message_encoding,omitempty"` - JsonFormat *FeatureSet_JsonFormat `protobuf:"varint,6,opt,name=json_format,json=jsonFormat,enum=google.protobuf.FeatureSet_JsonFormat" json:"json_format,omitempty"` - extensionFields protoimpl.ExtensionFields - unknownFields protoimpl.UnknownFields - sizeCache protoimpl.SizeCache + state protoimpl.MessageState `protogen:"open.v1"` + FieldPresence *FeatureSet_FieldPresence `protobuf:"varint,1,opt,name=field_presence,json=fieldPresence,enum=google.protobuf.FeatureSet_FieldPresence" json:"field_presence,omitempty"` + EnumType *FeatureSet_EnumType `protobuf:"varint,2,opt,name=enum_type,json=enumType,enum=google.protobuf.FeatureSet_EnumType" json:"enum_type,omitempty"` + RepeatedFieldEncoding *FeatureSet_RepeatedFieldEncoding `protobuf:"varint,3,opt,name=repeated_field_encoding,json=repeatedFieldEncoding,enum=google.protobuf.FeatureSet_RepeatedFieldEncoding" json:"repeated_field_encoding,omitempty"` + Utf8Validation *FeatureSet_Utf8Validation `protobuf:"varint,4,opt,name=utf8_validation,json=utf8Validation,enum=google.protobuf.FeatureSet_Utf8Validation" json:"utf8_validation,omitempty"` + MessageEncoding *FeatureSet_MessageEncoding `protobuf:"varint,5,opt,name=message_encoding,json=messageEncoding,enum=google.protobuf.FeatureSet_MessageEncoding" json:"message_encoding,omitempty"` + JsonFormat *FeatureSet_JsonFormat `protobuf:"varint,6,opt,name=json_format,json=jsonFormat,enum=google.protobuf.FeatureSet_JsonFormat" json:"json_format,omitempty"` + EnforceNamingStyle *FeatureSet_EnforceNamingStyle `protobuf:"varint,7,opt,name=enforce_naming_style,json=enforceNamingStyle,enum=google.protobuf.FeatureSet_EnforceNamingStyle" json:"enforce_naming_style,omitempty"` + DefaultSymbolVisibility *FeatureSet_VisibilityFeature_DefaultSymbolVisibility `protobuf:"varint,8,opt,name=default_symbol_visibility,json=defaultSymbolVisibility,enum=google.protobuf.FeatureSet_VisibilityFeature_DefaultSymbolVisibility" json:"default_symbol_visibility,omitempty"` + extensionFields protoimpl.ExtensionFields + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache } func (x *FeatureSet) Reset() { @@ -3387,6 +3645,20 @@ func (x *FeatureSet) GetJsonFormat() FeatureSet_JsonFormat { return FeatureSet_JSON_FORMAT_UNKNOWN } +func (x *FeatureSet) GetEnforceNamingStyle() FeatureSet_EnforceNamingStyle { + if x != nil && x.EnforceNamingStyle != nil { + return *x.EnforceNamingStyle + } + return FeatureSet_ENFORCE_NAMING_STYLE_UNKNOWN +} + +func (x *FeatureSet) GetDefaultSymbolVisibility() FeatureSet_VisibilityFeature_DefaultSymbolVisibility { + if x != nil && x.DefaultSymbolVisibility != nil { + return *x.DefaultSymbolVisibility + } + return FeatureSet_VisibilityFeature_DEFAULT_SYMBOL_VISIBILITY_UNKNOWN +} + // A compiled specification for the defaults of a set of features. These // messages are generated from FeatureSet extensions and can be used to seed // feature resolution. The resolution with this object becomes a simple search @@ -4047,6 +4319,42 @@ func (x *UninterpretedOption_NamePart) GetIsExtension() bool { return false } +type FeatureSet_VisibilityFeature struct { + state protoimpl.MessageState `protogen:"open.v1"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *FeatureSet_VisibilityFeature) Reset() { + *x = FeatureSet_VisibilityFeature{} + mi := &file_google_protobuf_descriptor_proto_msgTypes[30] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *FeatureSet_VisibilityFeature) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*FeatureSet_VisibilityFeature) ProtoMessage() {} + +func (x *FeatureSet_VisibilityFeature) ProtoReflect() protoreflect.Message { + mi := &file_google_protobuf_descriptor_proto_msgTypes[30] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use FeatureSet_VisibilityFeature.ProtoReflect.Descriptor instead. +func (*FeatureSet_VisibilityFeature) Descriptor() ([]byte, []int) { + return file_google_protobuf_descriptor_proto_rawDescGZIP(), []int{19, 0} +} + // A map from every known edition with a unique set of defaults to its // defaults. Not all editions may be contained here. For a given edition, // the defaults at the closest matching edition ordered at or before it should @@ -4064,7 +4372,7 @@ type FeatureSetDefaults_FeatureSetEditionDefault struct { func (x *FeatureSetDefaults_FeatureSetEditionDefault) Reset() { *x = FeatureSetDefaults_FeatureSetEditionDefault{} - mi := &file_google_protobuf_descriptor_proto_msgTypes[30] + mi := &file_google_protobuf_descriptor_proto_msgTypes[31] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -4076,7 +4384,7 @@ func (x *FeatureSetDefaults_FeatureSetEditionDefault) String() string { func (*FeatureSetDefaults_FeatureSetEditionDefault) ProtoMessage() {} func (x *FeatureSetDefaults_FeatureSetEditionDefault) ProtoReflect() protoreflect.Message { - mi := &file_google_protobuf_descriptor_proto_msgTypes[30] + mi := &file_google_protobuf_descriptor_proto_msgTypes[31] if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -4212,7 +4520,7 @@ type SourceCodeInfo_Location struct { func (x *SourceCodeInfo_Location) Reset() { *x = SourceCodeInfo_Location{} - mi := &file_google_protobuf_descriptor_proto_msgTypes[31] + mi := &file_google_protobuf_descriptor_proto_msgTypes[32] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -4224,7 +4532,7 @@ func (x *SourceCodeInfo_Location) String() string { func (*SourceCodeInfo_Location) ProtoMessage() {} func (x *SourceCodeInfo_Location) ProtoReflect() protoreflect.Message { - mi := &file_google_protobuf_descriptor_proto_msgTypes[31] + mi := &file_google_protobuf_descriptor_proto_msgTypes[32] if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -4296,7 +4604,7 @@ type GeneratedCodeInfo_Annotation struct { func (x *GeneratedCodeInfo_Annotation) Reset() { *x = GeneratedCodeInfo_Annotation{} - mi := &file_google_protobuf_descriptor_proto_msgTypes[32] + mi := &file_google_protobuf_descriptor_proto_msgTypes[33] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -4308,7 +4616,7 @@ func (x *GeneratedCodeInfo_Annotation) String() string { func (*GeneratedCodeInfo_Annotation) ProtoMessage() {} func (x *GeneratedCodeInfo_Annotation) ProtoReflect() protoreflect.Message { - mi := &file_google_protobuf_descriptor_proto_msgTypes[32] + mi := &file_google_protobuf_descriptor_proto_msgTypes[33] if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -4361,777 +4669,389 @@ func (x *GeneratedCodeInfo_Annotation) GetSemantic() GeneratedCodeInfo_Annotatio var File_google_protobuf_descriptor_proto protoreflect.FileDescriptor -var file_google_protobuf_descriptor_proto_rawDesc = string([]byte{ - 0x0a, 0x20, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, - 0x66, 0x2f, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x2e, 0x70, 0x72, 0x6f, - 0x74, 0x6f, 0x12, 0x0f, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, - 0x62, 0x75, 0x66, 0x22, 0x5b, 0x0a, 0x11, 0x46, 0x69, 0x6c, 0x65, 0x44, 0x65, 0x73, 0x63, 0x72, - 0x69, 0x70, 0x74, 0x6f, 0x72, 0x53, 0x65, 0x74, 0x12, 0x38, 0x0a, 0x04, 0x66, 0x69, 0x6c, 0x65, - 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, - 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x6c, 0x65, 0x44, 0x65, 0x73, - 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x52, 0x04, 0x66, 0x69, - 0x6c, 0x65, 0x2a, 0x0c, 0x08, 0x80, 0xec, 0xca, 0xff, 0x01, 0x10, 0x81, 0xec, 0xca, 0xff, 0x01, - 0x22, 0x98, 0x05, 0x0a, 0x13, 0x46, 0x69, 0x6c, 0x65, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, - 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, - 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x18, 0x0a, 0x07, - 0x70, 0x61, 0x63, 0x6b, 0x61, 0x67, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x70, - 0x61, 0x63, 0x6b, 0x61, 0x67, 0x65, 0x12, 0x1e, 0x0a, 0x0a, 0x64, 0x65, 0x70, 0x65, 0x6e, 0x64, - 0x65, 0x6e, 0x63, 0x79, 0x18, 0x03, 0x20, 0x03, 0x28, 0x09, 0x52, 0x0a, 0x64, 0x65, 0x70, 0x65, - 0x6e, 0x64, 0x65, 0x6e, 0x63, 0x79, 0x12, 0x2b, 0x0a, 0x11, 0x70, 0x75, 0x62, 0x6c, 0x69, 0x63, - 0x5f, 0x64, 0x65, 0x70, 0x65, 0x6e, 0x64, 0x65, 0x6e, 0x63, 0x79, 0x18, 0x0a, 0x20, 0x03, 0x28, - 0x05, 0x52, 0x10, 0x70, 0x75, 0x62, 0x6c, 0x69, 0x63, 0x44, 0x65, 0x70, 0x65, 0x6e, 0x64, 0x65, - 0x6e, 0x63, 0x79, 0x12, 0x27, 0x0a, 0x0f, 0x77, 0x65, 0x61, 0x6b, 0x5f, 0x64, 0x65, 0x70, 0x65, - 0x6e, 0x64, 0x65, 0x6e, 0x63, 0x79, 0x18, 0x0b, 0x20, 0x03, 0x28, 0x05, 0x52, 0x0e, 0x77, 0x65, - 0x61, 0x6b, 0x44, 0x65, 0x70, 0x65, 0x6e, 0x64, 0x65, 0x6e, 0x63, 0x79, 0x12, 0x43, 0x0a, 0x0c, - 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x04, 0x20, 0x03, - 0x28, 0x0b, 0x32, 0x20, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, - 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, - 0x72, 0x6f, 0x74, 0x6f, 0x52, 0x0b, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x54, 0x79, 0x70, - 0x65, 0x12, 0x41, 0x0a, 0x09, 0x65, 0x6e, 0x75, 0x6d, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x05, - 0x20, 0x03, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, - 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6e, 0x75, 0x6d, 0x44, 0x65, 0x73, 0x63, 0x72, - 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x52, 0x08, 0x65, 0x6e, 0x75, 0x6d, - 0x54, 0x79, 0x70, 0x65, 0x12, 0x41, 0x0a, 0x07, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x18, - 0x06, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x27, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, - 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x44, - 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x52, 0x07, - 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x12, 0x43, 0x0a, 0x09, 0x65, 0x78, 0x74, 0x65, 0x6e, - 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x07, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x25, 0x2e, 0x67, 0x6f, 0x6f, - 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x65, - 0x6c, 0x64, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, - 0x6f, 0x52, 0x09, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x36, 0x0a, 0x07, - 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x08, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1c, 0x2e, - 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, - 0x46, 0x69, 0x6c, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x07, 0x6f, 0x70, 0x74, - 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x49, 0x0a, 0x10, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, 0x63, - 0x6f, 0x64, 0x65, 0x5f, 0x69, 0x6e, 0x66, 0x6f, 0x18, 0x09, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1f, - 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, - 0x2e, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x43, 0x6f, 0x64, 0x65, 0x49, 0x6e, 0x66, 0x6f, 0x52, - 0x0e, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x43, 0x6f, 0x64, 0x65, 0x49, 0x6e, 0x66, 0x6f, 0x12, - 0x16, 0x0a, 0x06, 0x73, 0x79, 0x6e, 0x74, 0x61, 0x78, 0x18, 0x0c, 0x20, 0x01, 0x28, 0x09, 0x52, - 0x06, 0x73, 0x79, 0x6e, 0x74, 0x61, 0x78, 0x12, 0x32, 0x0a, 0x07, 0x65, 0x64, 0x69, 0x74, 0x69, - 0x6f, 0x6e, 0x18, 0x0e, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x18, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, - 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x64, 0x69, 0x74, 0x69, - 0x6f, 0x6e, 0x52, 0x07, 0x65, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x22, 0xb9, 0x06, 0x0a, 0x0f, - 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x12, - 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, - 0x61, 0x6d, 0x65, 0x12, 0x3b, 0x0a, 0x05, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x18, 0x02, 0x20, 0x03, - 0x28, 0x0b, 0x32, 0x25, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, - 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, - 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x52, 0x05, 0x66, 0x69, 0x65, 0x6c, 0x64, - 0x12, 0x43, 0x0a, 0x09, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x06, 0x20, - 0x03, 0x28, 0x0b, 0x32, 0x25, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, - 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x44, 0x65, 0x73, 0x63, 0x72, - 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x52, 0x09, 0x65, 0x78, 0x74, 0x65, - 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x41, 0x0a, 0x0b, 0x6e, 0x65, 0x73, 0x74, 0x65, 0x64, 0x5f, - 0x74, 0x79, 0x70, 0x65, 0x18, 0x03, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x20, 0x2e, 0x67, 0x6f, 0x6f, - 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44, 0x65, 0x73, - 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x52, 0x0a, 0x6e, 0x65, - 0x73, 0x74, 0x65, 0x64, 0x54, 0x79, 0x70, 0x65, 0x12, 0x41, 0x0a, 0x09, 0x65, 0x6e, 0x75, 0x6d, - 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x04, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x67, 0x6f, - 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6e, - 0x75, 0x6d, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, - 0x6f, 0x52, 0x08, 0x65, 0x6e, 0x75, 0x6d, 0x54, 0x79, 0x70, 0x65, 0x12, 0x58, 0x0a, 0x0f, 0x65, - 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x5f, 0x72, 0x61, 0x6e, 0x67, 0x65, 0x18, 0x05, - 0x20, 0x03, 0x28, 0x0b, 0x32, 0x2f, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, - 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, - 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, - 0x52, 0x61, 0x6e, 0x67, 0x65, 0x52, 0x0e, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, - 0x52, 0x61, 0x6e, 0x67, 0x65, 0x12, 0x44, 0x0a, 0x0a, 0x6f, 0x6e, 0x65, 0x6f, 0x66, 0x5f, 0x64, - 0x65, 0x63, 0x6c, 0x18, 0x08, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x25, 0x2e, 0x67, 0x6f, 0x6f, 0x67, - 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x4f, 0x6e, 0x65, 0x6f, - 0x66, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, - 0x52, 0x09, 0x6f, 0x6e, 0x65, 0x6f, 0x66, 0x44, 0x65, 0x63, 0x6c, 0x12, 0x39, 0x0a, 0x07, 0x6f, - 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1f, 0x2e, 0x67, - 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x4d, - 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x07, 0x6f, - 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x55, 0x0a, 0x0e, 0x72, 0x65, 0x73, 0x65, 0x72, 0x76, - 0x65, 0x64, 0x5f, 0x72, 0x61, 0x6e, 0x67, 0x65, 0x18, 0x09, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x2e, - 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, - 0x2e, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, - 0x2e, 0x52, 0x65, 0x73, 0x65, 0x72, 0x76, 0x65, 0x64, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x52, 0x0d, - 0x72, 0x65, 0x73, 0x65, 0x72, 0x76, 0x65, 0x64, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x12, 0x23, 0x0a, - 0x0d, 0x72, 0x65, 0x73, 0x65, 0x72, 0x76, 0x65, 0x64, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x0a, - 0x20, 0x03, 0x28, 0x09, 0x52, 0x0c, 0x72, 0x65, 0x73, 0x65, 0x72, 0x76, 0x65, 0x64, 0x4e, 0x61, - 0x6d, 0x65, 0x1a, 0x7a, 0x0a, 0x0e, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x52, - 0x61, 0x6e, 0x67, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x73, 0x74, 0x61, 0x72, 0x74, 0x18, 0x01, 0x20, - 0x01, 0x28, 0x05, 0x52, 0x05, 0x73, 0x74, 0x61, 0x72, 0x74, 0x12, 0x10, 0x0a, 0x03, 0x65, 0x6e, - 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x05, 0x52, 0x03, 0x65, 0x6e, 0x64, 0x12, 0x40, 0x0a, 0x07, - 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x26, 0x2e, - 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, - 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x4f, 0x70, - 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x1a, 0x37, - 0x0a, 0x0d, 0x52, 0x65, 0x73, 0x65, 0x72, 0x76, 0x65, 0x64, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x12, - 0x14, 0x0a, 0x05, 0x73, 0x74, 0x61, 0x72, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x05, 0x52, 0x05, - 0x73, 0x74, 0x61, 0x72, 0x74, 0x12, 0x10, 0x0a, 0x03, 0x65, 0x6e, 0x64, 0x18, 0x02, 0x20, 0x01, - 0x28, 0x05, 0x52, 0x03, 0x65, 0x6e, 0x64, 0x22, 0xcc, 0x04, 0x0a, 0x15, 0x45, 0x78, 0x74, 0x65, - 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, - 0x73, 0x12, 0x58, 0x0a, 0x14, 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, - 0x65, 0x64, 0x5f, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0xe7, 0x07, 0x20, 0x03, 0x28, 0x0b, - 0x32, 0x24, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, - 0x75, 0x66, 0x2e, 0x55, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, - 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x13, 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, - 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x59, 0x0a, 0x0b, 0x64, - 0x65, 0x63, 0x6c, 0x61, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, - 0x32, 0x32, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, - 0x75, 0x66, 0x2e, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x52, 0x61, 0x6e, 0x67, - 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x44, 0x65, 0x63, 0x6c, 0x61, 0x72, 0x61, - 0x74, 0x69, 0x6f, 0x6e, 0x42, 0x03, 0x88, 0x01, 0x02, 0x52, 0x0b, 0x64, 0x65, 0x63, 0x6c, 0x61, - 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x37, 0x0a, 0x08, 0x66, 0x65, 0x61, 0x74, 0x75, 0x72, - 0x65, 0x73, 0x18, 0x32, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1b, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, - 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x65, 0x61, 0x74, 0x75, - 0x72, 0x65, 0x53, 0x65, 0x74, 0x52, 0x08, 0x66, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, 0x12, - 0x6d, 0x0a, 0x0c, 0x76, 0x65, 0x72, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, - 0x03, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x38, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, - 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, - 0x6e, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x56, 0x65, - 0x72, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x53, 0x74, 0x61, 0x74, 0x65, 0x3a, - 0x0a, 0x55, 0x4e, 0x56, 0x45, 0x52, 0x49, 0x46, 0x49, 0x45, 0x44, 0x42, 0x03, 0x88, 0x01, 0x02, - 0x52, 0x0c, 0x76, 0x65, 0x72, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x1a, 0x94, - 0x01, 0x0a, 0x0b, 0x44, 0x65, 0x63, 0x6c, 0x61, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x16, - 0x0a, 0x06, 0x6e, 0x75, 0x6d, 0x62, 0x65, 0x72, 0x18, 0x01, 0x20, 0x01, 0x28, 0x05, 0x52, 0x06, - 0x6e, 0x75, 0x6d, 0x62, 0x65, 0x72, 0x12, 0x1b, 0x0a, 0x09, 0x66, 0x75, 0x6c, 0x6c, 0x5f, 0x6e, - 0x61, 0x6d, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x66, 0x75, 0x6c, 0x6c, 0x4e, - 0x61, 0x6d, 0x65, 0x12, 0x12, 0x0a, 0x04, 0x74, 0x79, 0x70, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, - 0x09, 0x52, 0x04, 0x74, 0x79, 0x70, 0x65, 0x12, 0x1a, 0x0a, 0x08, 0x72, 0x65, 0x73, 0x65, 0x72, - 0x76, 0x65, 0x64, 0x18, 0x05, 0x20, 0x01, 0x28, 0x08, 0x52, 0x08, 0x72, 0x65, 0x73, 0x65, 0x72, - 0x76, 0x65, 0x64, 0x12, 0x1a, 0x0a, 0x08, 0x72, 0x65, 0x70, 0x65, 0x61, 0x74, 0x65, 0x64, 0x18, - 0x06, 0x20, 0x01, 0x28, 0x08, 0x52, 0x08, 0x72, 0x65, 0x70, 0x65, 0x61, 0x74, 0x65, 0x64, 0x4a, - 0x04, 0x08, 0x04, 0x10, 0x05, 0x22, 0x34, 0x0a, 0x11, 0x56, 0x65, 0x72, 0x69, 0x66, 0x69, 0x63, - 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x53, 0x74, 0x61, 0x74, 0x65, 0x12, 0x0f, 0x0a, 0x0b, 0x44, 0x45, - 0x43, 0x4c, 0x41, 0x52, 0x41, 0x54, 0x49, 0x4f, 0x4e, 0x10, 0x00, 0x12, 0x0e, 0x0a, 0x0a, 0x55, - 0x4e, 0x56, 0x45, 0x52, 0x49, 0x46, 0x49, 0x45, 0x44, 0x10, 0x01, 0x2a, 0x09, 0x08, 0xe8, 0x07, - 0x10, 0x80, 0x80, 0x80, 0x80, 0x02, 0x22, 0xc1, 0x06, 0x0a, 0x14, 0x46, 0x69, 0x65, 0x6c, 0x64, - 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x12, - 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, - 0x61, 0x6d, 0x65, 0x12, 0x16, 0x0a, 0x06, 0x6e, 0x75, 0x6d, 0x62, 0x65, 0x72, 0x18, 0x03, 0x20, - 0x01, 0x28, 0x05, 0x52, 0x06, 0x6e, 0x75, 0x6d, 0x62, 0x65, 0x72, 0x12, 0x41, 0x0a, 0x05, 0x6c, - 0x61, 0x62, 0x65, 0x6c, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x2b, 0x2e, 0x67, 0x6f, 0x6f, - 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x65, - 0x6c, 0x64, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, - 0x6f, 0x2e, 0x4c, 0x61, 0x62, 0x65, 0x6c, 0x52, 0x05, 0x6c, 0x61, 0x62, 0x65, 0x6c, 0x12, 0x3e, - 0x0a, 0x04, 0x74, 0x79, 0x70, 0x65, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x2a, 0x2e, 0x67, - 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, - 0x69, 0x65, 0x6c, 0x64, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, - 0x6f, 0x74, 0x6f, 0x2e, 0x54, 0x79, 0x70, 0x65, 0x52, 0x04, 0x74, 0x79, 0x70, 0x65, 0x12, 0x1b, - 0x0a, 0x09, 0x74, 0x79, 0x70, 0x65, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x06, 0x20, 0x01, 0x28, - 0x09, 0x52, 0x08, 0x74, 0x79, 0x70, 0x65, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x1a, 0x0a, 0x08, 0x65, - 0x78, 0x74, 0x65, 0x6e, 0x64, 0x65, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x65, - 0x78, 0x74, 0x65, 0x6e, 0x64, 0x65, 0x65, 0x12, 0x23, 0x0a, 0x0d, 0x64, 0x65, 0x66, 0x61, 0x75, - 0x6c, 0x74, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x07, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0c, - 0x64, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x1f, 0x0a, 0x0b, - 0x6f, 0x6e, 0x65, 0x6f, 0x66, 0x5f, 0x69, 0x6e, 0x64, 0x65, 0x78, 0x18, 0x09, 0x20, 0x01, 0x28, - 0x05, 0x52, 0x0a, 0x6f, 0x6e, 0x65, 0x6f, 0x66, 0x49, 0x6e, 0x64, 0x65, 0x78, 0x12, 0x1b, 0x0a, - 0x09, 0x6a, 0x73, 0x6f, 0x6e, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x09, - 0x52, 0x08, 0x6a, 0x73, 0x6f, 0x6e, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x37, 0x0a, 0x07, 0x6f, 0x70, - 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x08, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1d, 0x2e, 0x67, 0x6f, - 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, - 0x65, 0x6c, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x07, 0x6f, 0x70, 0x74, 0x69, - 0x6f, 0x6e, 0x73, 0x12, 0x27, 0x0a, 0x0f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, 0x5f, 0x6f, 0x70, - 0x74, 0x69, 0x6f, 0x6e, 0x61, 0x6c, 0x18, 0x11, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0e, 0x70, 0x72, - 0x6f, 0x74, 0x6f, 0x33, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x61, 0x6c, 0x22, 0xb6, 0x02, 0x0a, - 0x04, 0x54, 0x79, 0x70, 0x65, 0x12, 0x0f, 0x0a, 0x0b, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x44, 0x4f, - 0x55, 0x42, 0x4c, 0x45, 0x10, 0x01, 0x12, 0x0e, 0x0a, 0x0a, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x46, - 0x4c, 0x4f, 0x41, 0x54, 0x10, 0x02, 0x12, 0x0e, 0x0a, 0x0a, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x49, - 0x4e, 0x54, 0x36, 0x34, 0x10, 0x03, 0x12, 0x0f, 0x0a, 0x0b, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x55, - 0x49, 0x4e, 0x54, 0x36, 0x34, 0x10, 0x04, 0x12, 0x0e, 0x0a, 0x0a, 0x54, 0x59, 0x50, 0x45, 0x5f, - 0x49, 0x4e, 0x54, 0x33, 0x32, 0x10, 0x05, 0x12, 0x10, 0x0a, 0x0c, 0x54, 0x59, 0x50, 0x45, 0x5f, - 0x46, 0x49, 0x58, 0x45, 0x44, 0x36, 0x34, 0x10, 0x06, 0x12, 0x10, 0x0a, 0x0c, 0x54, 0x59, 0x50, - 0x45, 0x5f, 0x46, 0x49, 0x58, 0x45, 0x44, 0x33, 0x32, 0x10, 0x07, 0x12, 0x0d, 0x0a, 0x09, 0x54, - 0x59, 0x50, 0x45, 0x5f, 0x42, 0x4f, 0x4f, 0x4c, 0x10, 0x08, 0x12, 0x0f, 0x0a, 0x0b, 0x54, 0x59, - 0x50, 0x45, 0x5f, 0x53, 0x54, 0x52, 0x49, 0x4e, 0x47, 0x10, 0x09, 0x12, 0x0e, 0x0a, 0x0a, 0x54, - 0x59, 0x50, 0x45, 0x5f, 0x47, 0x52, 0x4f, 0x55, 0x50, 0x10, 0x0a, 0x12, 0x10, 0x0a, 0x0c, 0x54, - 0x59, 0x50, 0x45, 0x5f, 0x4d, 0x45, 0x53, 0x53, 0x41, 0x47, 0x45, 0x10, 0x0b, 0x12, 0x0e, 0x0a, - 0x0a, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x42, 0x59, 0x54, 0x45, 0x53, 0x10, 0x0c, 0x12, 0x0f, 0x0a, - 0x0b, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x55, 0x49, 0x4e, 0x54, 0x33, 0x32, 0x10, 0x0d, 0x12, 0x0d, - 0x0a, 0x09, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x45, 0x4e, 0x55, 0x4d, 0x10, 0x0e, 0x12, 0x11, 0x0a, - 0x0d, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x53, 0x46, 0x49, 0x58, 0x45, 0x44, 0x33, 0x32, 0x10, 0x0f, - 0x12, 0x11, 0x0a, 0x0d, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x53, 0x46, 0x49, 0x58, 0x45, 0x44, 0x36, - 0x34, 0x10, 0x10, 0x12, 0x0f, 0x0a, 0x0b, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x53, 0x49, 0x4e, 0x54, - 0x33, 0x32, 0x10, 0x11, 0x12, 0x0f, 0x0a, 0x0b, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x53, 0x49, 0x4e, - 0x54, 0x36, 0x34, 0x10, 0x12, 0x22, 0x43, 0x0a, 0x05, 0x4c, 0x61, 0x62, 0x65, 0x6c, 0x12, 0x12, - 0x0a, 0x0e, 0x4c, 0x41, 0x42, 0x45, 0x4c, 0x5f, 0x4f, 0x50, 0x54, 0x49, 0x4f, 0x4e, 0x41, 0x4c, - 0x10, 0x01, 0x12, 0x12, 0x0a, 0x0e, 0x4c, 0x41, 0x42, 0x45, 0x4c, 0x5f, 0x52, 0x45, 0x50, 0x45, - 0x41, 0x54, 0x45, 0x44, 0x10, 0x03, 0x12, 0x12, 0x0a, 0x0e, 0x4c, 0x41, 0x42, 0x45, 0x4c, 0x5f, - 0x52, 0x45, 0x51, 0x55, 0x49, 0x52, 0x45, 0x44, 0x10, 0x02, 0x22, 0x63, 0x0a, 0x14, 0x4f, 0x6e, - 0x65, 0x6f, 0x66, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, - 0x74, 0x6f, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, - 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x37, 0x0a, 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, - 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, - 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x4f, 0x6e, 0x65, 0x6f, 0x66, 0x4f, - 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x22, - 0xe3, 0x02, 0x0a, 0x13, 0x45, 0x6e, 0x75, 0x6d, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, - 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, - 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x3f, 0x0a, 0x05, 0x76, - 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x29, 0x2e, 0x67, 0x6f, 0x6f, - 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6e, 0x75, - 0x6d, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, - 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x36, 0x0a, 0x07, - 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1c, 0x2e, - 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, - 0x45, 0x6e, 0x75, 0x6d, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x07, 0x6f, 0x70, 0x74, - 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x5d, 0x0a, 0x0e, 0x72, 0x65, 0x73, 0x65, 0x72, 0x76, 0x65, 0x64, - 0x5f, 0x72, 0x61, 0x6e, 0x67, 0x65, 0x18, 0x04, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x36, 0x2e, 0x67, - 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, - 0x6e, 0x75, 0x6d, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, - 0x74, 0x6f, 0x2e, 0x45, 0x6e, 0x75, 0x6d, 0x52, 0x65, 0x73, 0x65, 0x72, 0x76, 0x65, 0x64, 0x52, - 0x61, 0x6e, 0x67, 0x65, 0x52, 0x0d, 0x72, 0x65, 0x73, 0x65, 0x72, 0x76, 0x65, 0x64, 0x52, 0x61, - 0x6e, 0x67, 0x65, 0x12, 0x23, 0x0a, 0x0d, 0x72, 0x65, 0x73, 0x65, 0x72, 0x76, 0x65, 0x64, 0x5f, - 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x05, 0x20, 0x03, 0x28, 0x09, 0x52, 0x0c, 0x72, 0x65, 0x73, 0x65, - 0x72, 0x76, 0x65, 0x64, 0x4e, 0x61, 0x6d, 0x65, 0x1a, 0x3b, 0x0a, 0x11, 0x45, 0x6e, 0x75, 0x6d, - 0x52, 0x65, 0x73, 0x65, 0x72, 0x76, 0x65, 0x64, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x12, 0x14, 0x0a, - 0x05, 0x73, 0x74, 0x61, 0x72, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x05, 0x52, 0x05, 0x73, 0x74, - 0x61, 0x72, 0x74, 0x12, 0x10, 0x0a, 0x03, 0x65, 0x6e, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x05, - 0x52, 0x03, 0x65, 0x6e, 0x64, 0x22, 0x83, 0x01, 0x0a, 0x18, 0x45, 0x6e, 0x75, 0x6d, 0x56, 0x61, - 0x6c, 0x75, 0x65, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, - 0x74, 0x6f, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, - 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x16, 0x0a, 0x06, 0x6e, 0x75, 0x6d, 0x62, 0x65, 0x72, - 0x18, 0x02, 0x20, 0x01, 0x28, 0x05, 0x52, 0x06, 0x6e, 0x75, 0x6d, 0x62, 0x65, 0x72, 0x12, 0x3b, - 0x0a, 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, - 0x21, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, - 0x66, 0x2e, 0x45, 0x6e, 0x75, 0x6d, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, - 0x6e, 0x73, 0x52, 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x22, 0xa7, 0x01, 0x0a, 0x16, - 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, - 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, - 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x3e, 0x0a, 0x06, 0x6d, 0x65, - 0x74, 0x68, 0x6f, 0x64, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x26, 0x2e, 0x67, 0x6f, 0x6f, - 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x4d, 0x65, 0x74, - 0x68, 0x6f, 0x64, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, - 0x74, 0x6f, 0x52, 0x06, 0x6d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x12, 0x39, 0x0a, 0x07, 0x6f, 0x70, - 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1f, 0x2e, 0x67, 0x6f, - 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x53, 0x65, - 0x72, 0x76, 0x69, 0x63, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x07, 0x6f, 0x70, - 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x22, 0x89, 0x02, 0x0a, 0x15, 0x4d, 0x65, 0x74, 0x68, 0x6f, 0x64, - 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x12, - 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, - 0x61, 0x6d, 0x65, 0x12, 0x1d, 0x0a, 0x0a, 0x69, 0x6e, 0x70, 0x75, 0x74, 0x5f, 0x74, 0x79, 0x70, - 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x69, 0x6e, 0x70, 0x75, 0x74, 0x54, 0x79, - 0x70, 0x65, 0x12, 0x1f, 0x0a, 0x0b, 0x6f, 0x75, 0x74, 0x70, 0x75, 0x74, 0x5f, 0x74, 0x79, 0x70, - 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0a, 0x6f, 0x75, 0x74, 0x70, 0x75, 0x74, 0x54, - 0x79, 0x70, 0x65, 0x12, 0x38, 0x0a, 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x04, - 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1e, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, - 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x4d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x4f, 0x70, 0x74, - 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x30, 0x0a, - 0x10, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x5f, 0x73, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x69, 0x6e, - 0x67, 0x18, 0x05, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x0f, - 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x53, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x69, 0x6e, 0x67, 0x12, - 0x30, 0x0a, 0x10, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x5f, 0x73, 0x74, 0x72, 0x65, 0x61, 0x6d, - 0x69, 0x6e, 0x67, 0x18, 0x06, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, - 0x52, 0x0f, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x53, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x69, 0x6e, - 0x67, 0x22, 0xad, 0x09, 0x0a, 0x0b, 0x46, 0x69, 0x6c, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, - 0x73, 0x12, 0x21, 0x0a, 0x0c, 0x6a, 0x61, 0x76, 0x61, 0x5f, 0x70, 0x61, 0x63, 0x6b, 0x61, 0x67, - 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x6a, 0x61, 0x76, 0x61, 0x50, 0x61, 0x63, - 0x6b, 0x61, 0x67, 0x65, 0x12, 0x30, 0x0a, 0x14, 0x6a, 0x61, 0x76, 0x61, 0x5f, 0x6f, 0x75, 0x74, - 0x65, 0x72, 0x5f, 0x63, 0x6c, 0x61, 0x73, 0x73, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x08, 0x20, 0x01, - 0x28, 0x09, 0x52, 0x12, 0x6a, 0x61, 0x76, 0x61, 0x4f, 0x75, 0x74, 0x65, 0x72, 0x43, 0x6c, 0x61, - 0x73, 0x73, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x35, 0x0a, 0x13, 0x6a, 0x61, 0x76, 0x61, 0x5f, 0x6d, - 0x75, 0x6c, 0x74, 0x69, 0x70, 0x6c, 0x65, 0x5f, 0x66, 0x69, 0x6c, 0x65, 0x73, 0x18, 0x0a, 0x20, - 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x11, 0x6a, 0x61, 0x76, 0x61, - 0x4d, 0x75, 0x6c, 0x74, 0x69, 0x70, 0x6c, 0x65, 0x46, 0x69, 0x6c, 0x65, 0x73, 0x12, 0x44, 0x0a, - 0x1d, 0x6a, 0x61, 0x76, 0x61, 0x5f, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x65, 0x5f, 0x65, - 0x71, 0x75, 0x61, 0x6c, 0x73, 0x5f, 0x61, 0x6e, 0x64, 0x5f, 0x68, 0x61, 0x73, 0x68, 0x18, 0x14, - 0x20, 0x01, 0x28, 0x08, 0x42, 0x02, 0x18, 0x01, 0x52, 0x19, 0x6a, 0x61, 0x76, 0x61, 0x47, 0x65, - 0x6e, 0x65, 0x72, 0x61, 0x74, 0x65, 0x45, 0x71, 0x75, 0x61, 0x6c, 0x73, 0x41, 0x6e, 0x64, 0x48, - 0x61, 0x73, 0x68, 0x12, 0x3a, 0x0a, 0x16, 0x6a, 0x61, 0x76, 0x61, 0x5f, 0x73, 0x74, 0x72, 0x69, - 0x6e, 0x67, 0x5f, 0x63, 0x68, 0x65, 0x63, 0x6b, 0x5f, 0x75, 0x74, 0x66, 0x38, 0x18, 0x1b, 0x20, - 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x13, 0x6a, 0x61, 0x76, 0x61, - 0x53, 0x74, 0x72, 0x69, 0x6e, 0x67, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x55, 0x74, 0x66, 0x38, 0x12, - 0x53, 0x0a, 0x0c, 0x6f, 0x70, 0x74, 0x69, 0x6d, 0x69, 0x7a, 0x65, 0x5f, 0x66, 0x6f, 0x72, 0x18, - 0x09, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x29, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, - 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x6c, 0x65, 0x4f, 0x70, 0x74, 0x69, - 0x6f, 0x6e, 0x73, 0x2e, 0x4f, 0x70, 0x74, 0x69, 0x6d, 0x69, 0x7a, 0x65, 0x4d, 0x6f, 0x64, 0x65, - 0x3a, 0x05, 0x53, 0x50, 0x45, 0x45, 0x44, 0x52, 0x0b, 0x6f, 0x70, 0x74, 0x69, 0x6d, 0x69, 0x7a, - 0x65, 0x46, 0x6f, 0x72, 0x12, 0x1d, 0x0a, 0x0a, 0x67, 0x6f, 0x5f, 0x70, 0x61, 0x63, 0x6b, 0x61, - 0x67, 0x65, 0x18, 0x0b, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x67, 0x6f, 0x50, 0x61, 0x63, 0x6b, - 0x61, 0x67, 0x65, 0x12, 0x35, 0x0a, 0x13, 0x63, 0x63, 0x5f, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x69, - 0x63, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x18, 0x10, 0x20, 0x01, 0x28, 0x08, - 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x11, 0x63, 0x63, 0x47, 0x65, 0x6e, 0x65, 0x72, - 0x69, 0x63, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x12, 0x39, 0x0a, 0x15, 0x6a, 0x61, - 0x76, 0x61, 0x5f, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x69, 0x63, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x69, - 0x63, 0x65, 0x73, 0x18, 0x11, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, - 0x52, 0x13, 0x6a, 0x61, 0x76, 0x61, 0x47, 0x65, 0x6e, 0x65, 0x72, 0x69, 0x63, 0x53, 0x65, 0x72, - 0x76, 0x69, 0x63, 0x65, 0x73, 0x12, 0x35, 0x0a, 0x13, 0x70, 0x79, 0x5f, 0x67, 0x65, 0x6e, 0x65, - 0x72, 0x69, 0x63, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x18, 0x12, 0x20, 0x01, - 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x11, 0x70, 0x79, 0x47, 0x65, 0x6e, - 0x65, 0x72, 0x69, 0x63, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x12, 0x25, 0x0a, 0x0a, - 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x18, 0x17, 0x20, 0x01, 0x28, 0x08, - 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x0a, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, - 0x74, 0x65, 0x64, 0x12, 0x2e, 0x0a, 0x10, 0x63, 0x63, 0x5f, 0x65, 0x6e, 0x61, 0x62, 0x6c, 0x65, - 0x5f, 0x61, 0x72, 0x65, 0x6e, 0x61, 0x73, 0x18, 0x1f, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x04, 0x74, - 0x72, 0x75, 0x65, 0x52, 0x0e, 0x63, 0x63, 0x45, 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x41, 0x72, 0x65, - 0x6e, 0x61, 0x73, 0x12, 0x2a, 0x0a, 0x11, 0x6f, 0x62, 0x6a, 0x63, 0x5f, 0x63, 0x6c, 0x61, 0x73, - 0x73, 0x5f, 0x70, 0x72, 0x65, 0x66, 0x69, 0x78, 0x18, 0x24, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0f, - 0x6f, 0x62, 0x6a, 0x63, 0x43, 0x6c, 0x61, 0x73, 0x73, 0x50, 0x72, 0x65, 0x66, 0x69, 0x78, 0x12, - 0x29, 0x0a, 0x10, 0x63, 0x73, 0x68, 0x61, 0x72, 0x70, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x70, - 0x61, 0x63, 0x65, 0x18, 0x25, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0f, 0x63, 0x73, 0x68, 0x61, 0x72, - 0x70, 0x4e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x12, 0x21, 0x0a, 0x0c, 0x73, 0x77, - 0x69, 0x66, 0x74, 0x5f, 0x70, 0x72, 0x65, 0x66, 0x69, 0x78, 0x18, 0x27, 0x20, 0x01, 0x28, 0x09, - 0x52, 0x0b, 0x73, 0x77, 0x69, 0x66, 0x74, 0x50, 0x72, 0x65, 0x66, 0x69, 0x78, 0x12, 0x28, 0x0a, - 0x10, 0x70, 0x68, 0x70, 0x5f, 0x63, 0x6c, 0x61, 0x73, 0x73, 0x5f, 0x70, 0x72, 0x65, 0x66, 0x69, - 0x78, 0x18, 0x28, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0e, 0x70, 0x68, 0x70, 0x43, 0x6c, 0x61, 0x73, - 0x73, 0x50, 0x72, 0x65, 0x66, 0x69, 0x78, 0x12, 0x23, 0x0a, 0x0d, 0x70, 0x68, 0x70, 0x5f, 0x6e, - 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x18, 0x29, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0c, - 0x70, 0x68, 0x70, 0x4e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x12, 0x34, 0x0a, 0x16, - 0x70, 0x68, 0x70, 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x5f, 0x6e, 0x61, 0x6d, - 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x18, 0x2c, 0x20, 0x01, 0x28, 0x09, 0x52, 0x14, 0x70, 0x68, - 0x70, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x4e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, - 0x63, 0x65, 0x12, 0x21, 0x0a, 0x0c, 0x72, 0x75, 0x62, 0x79, 0x5f, 0x70, 0x61, 0x63, 0x6b, 0x61, - 0x67, 0x65, 0x18, 0x2d, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x72, 0x75, 0x62, 0x79, 0x50, 0x61, - 0x63, 0x6b, 0x61, 0x67, 0x65, 0x12, 0x37, 0x0a, 0x08, 0x66, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, - 0x73, 0x18, 0x32, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1b, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, - 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, - 0x65, 0x53, 0x65, 0x74, 0x52, 0x08, 0x66, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, 0x12, 0x58, - 0x0a, 0x14, 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x5f, - 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0xe7, 0x07, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x24, 0x2e, - 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, - 0x55, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, - 0x69, 0x6f, 0x6e, 0x52, 0x13, 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, - 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x22, 0x3a, 0x0a, 0x0c, 0x4f, 0x70, 0x74, 0x69, - 0x6d, 0x69, 0x7a, 0x65, 0x4d, 0x6f, 0x64, 0x65, 0x12, 0x09, 0x0a, 0x05, 0x53, 0x50, 0x45, 0x45, - 0x44, 0x10, 0x01, 0x12, 0x0d, 0x0a, 0x09, 0x43, 0x4f, 0x44, 0x45, 0x5f, 0x53, 0x49, 0x5a, 0x45, - 0x10, 0x02, 0x12, 0x10, 0x0a, 0x0c, 0x4c, 0x49, 0x54, 0x45, 0x5f, 0x52, 0x55, 0x4e, 0x54, 0x49, - 0x4d, 0x45, 0x10, 0x03, 0x2a, 0x09, 0x08, 0xe8, 0x07, 0x10, 0x80, 0x80, 0x80, 0x80, 0x02, 0x4a, - 0x04, 0x08, 0x2a, 0x10, 0x2b, 0x4a, 0x04, 0x08, 0x26, 0x10, 0x27, 0x52, 0x14, 0x70, 0x68, 0x70, - 0x5f, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x69, 0x63, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, - 0x73, 0x22, 0xf4, 0x03, 0x0a, 0x0e, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x4f, 0x70, 0x74, - 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x3c, 0x0a, 0x17, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x5f, - 0x73, 0x65, 0x74, 0x5f, 0x77, 0x69, 0x72, 0x65, 0x5f, 0x66, 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x18, - 0x01, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x14, 0x6d, 0x65, - 0x73, 0x73, 0x61, 0x67, 0x65, 0x53, 0x65, 0x74, 0x57, 0x69, 0x72, 0x65, 0x46, 0x6f, 0x72, 0x6d, - 0x61, 0x74, 0x12, 0x4c, 0x0a, 0x1f, 0x6e, 0x6f, 0x5f, 0x73, 0x74, 0x61, 0x6e, 0x64, 0x61, 0x72, - 0x64, 0x5f, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x5f, 0x61, 0x63, 0x63, - 0x65, 0x73, 0x73, 0x6f, 0x72, 0x18, 0x02, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, - 0x73, 0x65, 0x52, 0x1c, 0x6e, 0x6f, 0x53, 0x74, 0x61, 0x6e, 0x64, 0x61, 0x72, 0x64, 0x44, 0x65, - 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x41, 0x63, 0x63, 0x65, 0x73, 0x73, 0x6f, 0x72, - 0x12, 0x25, 0x0a, 0x0a, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x18, 0x03, - 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x0a, 0x64, 0x65, 0x70, - 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x12, 0x1b, 0x0a, 0x09, 0x6d, 0x61, 0x70, 0x5f, 0x65, - 0x6e, 0x74, 0x72, 0x79, 0x18, 0x07, 0x20, 0x01, 0x28, 0x08, 0x52, 0x08, 0x6d, 0x61, 0x70, 0x45, - 0x6e, 0x74, 0x72, 0x79, 0x12, 0x56, 0x0a, 0x26, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, - 0x65, 0x64, 0x5f, 0x6c, 0x65, 0x67, 0x61, 0x63, 0x79, 0x5f, 0x6a, 0x73, 0x6f, 0x6e, 0x5f, 0x66, - 0x69, 0x65, 0x6c, 0x64, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x6c, 0x69, 0x63, 0x74, 0x73, 0x18, 0x0b, - 0x20, 0x01, 0x28, 0x08, 0x42, 0x02, 0x18, 0x01, 0x52, 0x22, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, - 0x61, 0x74, 0x65, 0x64, 0x4c, 0x65, 0x67, 0x61, 0x63, 0x79, 0x4a, 0x73, 0x6f, 0x6e, 0x46, 0x69, - 0x65, 0x6c, 0x64, 0x43, 0x6f, 0x6e, 0x66, 0x6c, 0x69, 0x63, 0x74, 0x73, 0x12, 0x37, 0x0a, 0x08, - 0x66, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, 0x18, 0x0c, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1b, - 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, - 0x2e, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, 0x65, 0x74, 0x52, 0x08, 0x66, 0x65, 0x61, - 0x74, 0x75, 0x72, 0x65, 0x73, 0x12, 0x58, 0x0a, 0x14, 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, - 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x5f, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0xe7, 0x07, - 0x20, 0x03, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, - 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x55, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, - 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x13, 0x75, 0x6e, 0x69, 0x6e, - 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x2a, - 0x09, 0x08, 0xe8, 0x07, 0x10, 0x80, 0x80, 0x80, 0x80, 0x02, 0x4a, 0x04, 0x08, 0x04, 0x10, 0x05, - 0x4a, 0x04, 0x08, 0x05, 0x10, 0x06, 0x4a, 0x04, 0x08, 0x06, 0x10, 0x07, 0x4a, 0x04, 0x08, 0x08, - 0x10, 0x09, 0x4a, 0x04, 0x08, 0x09, 0x10, 0x0a, 0x22, 0x9d, 0x0d, 0x0a, 0x0c, 0x46, 0x69, 0x65, - 0x6c, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x41, 0x0a, 0x05, 0x63, 0x74, 0x79, - 0x70, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x23, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, - 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, - 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x43, 0x54, 0x79, 0x70, 0x65, 0x3a, 0x06, 0x53, - 0x54, 0x52, 0x49, 0x4e, 0x47, 0x52, 0x05, 0x63, 0x74, 0x79, 0x70, 0x65, 0x12, 0x16, 0x0a, 0x06, - 0x70, 0x61, 0x63, 0x6b, 0x65, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x08, 0x52, 0x06, 0x70, 0x61, - 0x63, 0x6b, 0x65, 0x64, 0x12, 0x47, 0x0a, 0x06, 0x6a, 0x73, 0x74, 0x79, 0x70, 0x65, 0x18, 0x06, - 0x20, 0x01, 0x28, 0x0e, 0x32, 0x24, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, - 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x4f, 0x70, 0x74, 0x69, - 0x6f, 0x6e, 0x73, 0x2e, 0x4a, 0x53, 0x54, 0x79, 0x70, 0x65, 0x3a, 0x09, 0x4a, 0x53, 0x5f, 0x4e, - 0x4f, 0x52, 0x4d, 0x41, 0x4c, 0x52, 0x06, 0x6a, 0x73, 0x74, 0x79, 0x70, 0x65, 0x12, 0x19, 0x0a, - 0x04, 0x6c, 0x61, 0x7a, 0x79, 0x18, 0x05, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, - 0x73, 0x65, 0x52, 0x04, 0x6c, 0x61, 0x7a, 0x79, 0x12, 0x2e, 0x0a, 0x0f, 0x75, 0x6e, 0x76, 0x65, - 0x72, 0x69, 0x66, 0x69, 0x65, 0x64, 0x5f, 0x6c, 0x61, 0x7a, 0x79, 0x18, 0x0f, 0x20, 0x01, 0x28, - 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x0e, 0x75, 0x6e, 0x76, 0x65, 0x72, 0x69, - 0x66, 0x69, 0x65, 0x64, 0x4c, 0x61, 0x7a, 0x79, 0x12, 0x25, 0x0a, 0x0a, 0x64, 0x65, 0x70, 0x72, - 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x18, 0x03, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, - 0x6c, 0x73, 0x65, 0x52, 0x0a, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x12, - 0x19, 0x0a, 0x04, 0x77, 0x65, 0x61, 0x6b, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, - 0x61, 0x6c, 0x73, 0x65, 0x52, 0x04, 0x77, 0x65, 0x61, 0x6b, 0x12, 0x28, 0x0a, 0x0c, 0x64, 0x65, - 0x62, 0x75, 0x67, 0x5f, 0x72, 0x65, 0x64, 0x61, 0x63, 0x74, 0x18, 0x10, 0x20, 0x01, 0x28, 0x08, - 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x0b, 0x64, 0x65, 0x62, 0x75, 0x67, 0x52, 0x65, - 0x64, 0x61, 0x63, 0x74, 0x12, 0x4b, 0x0a, 0x09, 0x72, 0x65, 0x74, 0x65, 0x6e, 0x74, 0x69, 0x6f, - 0x6e, 0x18, 0x11, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x2d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, - 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x4f, - 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x74, - 0x65, 0x6e, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x09, 0x72, 0x65, 0x74, 0x65, 0x6e, 0x74, 0x69, 0x6f, - 0x6e, 0x12, 0x48, 0x0a, 0x07, 0x74, 0x61, 0x72, 0x67, 0x65, 0x74, 0x73, 0x18, 0x13, 0x20, 0x03, - 0x28, 0x0e, 0x32, 0x2e, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, - 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, - 0x73, 0x2e, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x54, 0x61, 0x72, 0x67, 0x65, 0x74, 0x54, 0x79, - 0x70, 0x65, 0x52, 0x07, 0x74, 0x61, 0x72, 0x67, 0x65, 0x74, 0x73, 0x12, 0x57, 0x0a, 0x10, 0x65, - 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x64, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x73, 0x18, - 0x14, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x2c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, - 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x4f, 0x70, 0x74, - 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x45, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x44, 0x65, 0x66, 0x61, - 0x75, 0x6c, 0x74, 0x52, 0x0f, 0x65, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x44, 0x65, 0x66, 0x61, - 0x75, 0x6c, 0x74, 0x73, 0x12, 0x37, 0x0a, 0x08, 0x66, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, - 0x18, 0x15, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1b, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, - 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, - 0x53, 0x65, 0x74, 0x52, 0x08, 0x66, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, 0x12, 0x55, 0x0a, - 0x0f, 0x66, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x5f, 0x73, 0x75, 0x70, 0x70, 0x6f, 0x72, 0x74, - 0x18, 0x16, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, - 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x4f, 0x70, - 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, 0x75, 0x70, - 0x70, 0x6f, 0x72, 0x74, 0x52, 0x0e, 0x66, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, 0x75, 0x70, - 0x70, 0x6f, 0x72, 0x74, 0x12, 0x58, 0x0a, 0x14, 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, - 0x72, 0x65, 0x74, 0x65, 0x64, 0x5f, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0xe7, 0x07, 0x20, - 0x03, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, - 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x55, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, - 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x13, 0x75, 0x6e, 0x69, 0x6e, 0x74, - 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x1a, 0x5a, - 0x0a, 0x0e, 0x45, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x44, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, - 0x12, 0x32, 0x0a, 0x07, 0x65, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, - 0x0e, 0x32, 0x18, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, - 0x62, 0x75, 0x66, 0x2e, 0x45, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x07, 0x65, 0x64, 0x69, - 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, - 0x01, 0x28, 0x09, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x1a, 0x96, 0x02, 0x0a, 0x0e, 0x46, - 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, 0x75, 0x70, 0x70, 0x6f, 0x72, 0x74, 0x12, 0x47, 0x0a, - 0x12, 0x65, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x69, 0x6e, 0x74, 0x72, 0x6f, 0x64, 0x75, - 0x63, 0x65, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x18, 0x2e, 0x67, 0x6f, 0x6f, 0x67, - 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x64, 0x69, 0x74, - 0x69, 0x6f, 0x6e, 0x52, 0x11, 0x65, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x49, 0x6e, 0x74, 0x72, - 0x6f, 0x64, 0x75, 0x63, 0x65, 0x64, 0x12, 0x47, 0x0a, 0x12, 0x65, 0x64, 0x69, 0x74, 0x69, 0x6f, - 0x6e, 0x5f, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x18, 0x02, 0x20, 0x01, - 0x28, 0x0e, 0x32, 0x18, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, - 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x11, 0x65, 0x64, - 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x44, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x12, - 0x2f, 0x0a, 0x13, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x77, - 0x61, 0x72, 0x6e, 0x69, 0x6e, 0x67, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x12, 0x64, 0x65, - 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x57, 0x61, 0x72, 0x6e, 0x69, 0x6e, 0x67, - 0x12, 0x41, 0x0a, 0x0f, 0x65, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x72, 0x65, 0x6d, 0x6f, - 0x76, 0x65, 0x64, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x18, 0x2e, 0x67, 0x6f, 0x6f, 0x67, - 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x64, 0x69, 0x74, - 0x69, 0x6f, 0x6e, 0x52, 0x0e, 0x65, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x6d, 0x6f, - 0x76, 0x65, 0x64, 0x22, 0x2f, 0x0a, 0x05, 0x43, 0x54, 0x79, 0x70, 0x65, 0x12, 0x0a, 0x0a, 0x06, - 0x53, 0x54, 0x52, 0x49, 0x4e, 0x47, 0x10, 0x00, 0x12, 0x08, 0x0a, 0x04, 0x43, 0x4f, 0x52, 0x44, - 0x10, 0x01, 0x12, 0x10, 0x0a, 0x0c, 0x53, 0x54, 0x52, 0x49, 0x4e, 0x47, 0x5f, 0x50, 0x49, 0x45, - 0x43, 0x45, 0x10, 0x02, 0x22, 0x35, 0x0a, 0x06, 0x4a, 0x53, 0x54, 0x79, 0x70, 0x65, 0x12, 0x0d, - 0x0a, 0x09, 0x4a, 0x53, 0x5f, 0x4e, 0x4f, 0x52, 0x4d, 0x41, 0x4c, 0x10, 0x00, 0x12, 0x0d, 0x0a, - 0x09, 0x4a, 0x53, 0x5f, 0x53, 0x54, 0x52, 0x49, 0x4e, 0x47, 0x10, 0x01, 0x12, 0x0d, 0x0a, 0x09, - 0x4a, 0x53, 0x5f, 0x4e, 0x55, 0x4d, 0x42, 0x45, 0x52, 0x10, 0x02, 0x22, 0x55, 0x0a, 0x0f, 0x4f, - 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x74, 0x65, 0x6e, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x15, - 0x0a, 0x11, 0x52, 0x45, 0x54, 0x45, 0x4e, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x55, 0x4e, 0x4b, 0x4e, - 0x4f, 0x57, 0x4e, 0x10, 0x00, 0x12, 0x15, 0x0a, 0x11, 0x52, 0x45, 0x54, 0x45, 0x4e, 0x54, 0x49, - 0x4f, 0x4e, 0x5f, 0x52, 0x55, 0x4e, 0x54, 0x49, 0x4d, 0x45, 0x10, 0x01, 0x12, 0x14, 0x0a, 0x10, - 0x52, 0x45, 0x54, 0x45, 0x4e, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x53, 0x4f, 0x55, 0x52, 0x43, 0x45, - 0x10, 0x02, 0x22, 0x8c, 0x02, 0x0a, 0x10, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x54, 0x61, 0x72, - 0x67, 0x65, 0x74, 0x54, 0x79, 0x70, 0x65, 0x12, 0x17, 0x0a, 0x13, 0x54, 0x41, 0x52, 0x47, 0x45, - 0x54, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x55, 0x4e, 0x4b, 0x4e, 0x4f, 0x57, 0x4e, 0x10, 0x00, - 0x12, 0x14, 0x0a, 0x10, 0x54, 0x41, 0x52, 0x47, 0x45, 0x54, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, - 0x46, 0x49, 0x4c, 0x45, 0x10, 0x01, 0x12, 0x1f, 0x0a, 0x1b, 0x54, 0x41, 0x52, 0x47, 0x45, 0x54, - 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x45, 0x58, 0x54, 0x45, 0x4e, 0x53, 0x49, 0x4f, 0x4e, 0x5f, - 0x52, 0x41, 0x4e, 0x47, 0x45, 0x10, 0x02, 0x12, 0x17, 0x0a, 0x13, 0x54, 0x41, 0x52, 0x47, 0x45, - 0x54, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x4d, 0x45, 0x53, 0x53, 0x41, 0x47, 0x45, 0x10, 0x03, - 0x12, 0x15, 0x0a, 0x11, 0x54, 0x41, 0x52, 0x47, 0x45, 0x54, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, - 0x46, 0x49, 0x45, 0x4c, 0x44, 0x10, 0x04, 0x12, 0x15, 0x0a, 0x11, 0x54, 0x41, 0x52, 0x47, 0x45, - 0x54, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x4f, 0x4e, 0x45, 0x4f, 0x46, 0x10, 0x05, 0x12, 0x14, - 0x0a, 0x10, 0x54, 0x41, 0x52, 0x47, 0x45, 0x54, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x45, 0x4e, - 0x55, 0x4d, 0x10, 0x06, 0x12, 0x1a, 0x0a, 0x16, 0x54, 0x41, 0x52, 0x47, 0x45, 0x54, 0x5f, 0x54, - 0x59, 0x50, 0x45, 0x5f, 0x45, 0x4e, 0x55, 0x4d, 0x5f, 0x45, 0x4e, 0x54, 0x52, 0x59, 0x10, 0x07, - 0x12, 0x17, 0x0a, 0x13, 0x54, 0x41, 0x52, 0x47, 0x45, 0x54, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, - 0x53, 0x45, 0x52, 0x56, 0x49, 0x43, 0x45, 0x10, 0x08, 0x12, 0x16, 0x0a, 0x12, 0x54, 0x41, 0x52, - 0x47, 0x45, 0x54, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x4d, 0x45, 0x54, 0x48, 0x4f, 0x44, 0x10, - 0x09, 0x2a, 0x09, 0x08, 0xe8, 0x07, 0x10, 0x80, 0x80, 0x80, 0x80, 0x02, 0x4a, 0x04, 0x08, 0x04, - 0x10, 0x05, 0x4a, 0x04, 0x08, 0x12, 0x10, 0x13, 0x22, 0xac, 0x01, 0x0a, 0x0c, 0x4f, 0x6e, 0x65, - 0x6f, 0x66, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x37, 0x0a, 0x08, 0x66, 0x65, 0x61, - 0x74, 0x75, 0x72, 0x65, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1b, 0x2e, 0x67, 0x6f, - 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x65, - 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, 0x65, 0x74, 0x52, 0x08, 0x66, 0x65, 0x61, 0x74, 0x75, 0x72, - 0x65, 0x73, 0x12, 0x58, 0x0a, 0x14, 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, - 0x74, 0x65, 0x64, 0x5f, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0xe7, 0x07, 0x20, 0x03, 0x28, - 0x0b, 0x32, 0x24, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, - 0x62, 0x75, 0x66, 0x2e, 0x55, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, - 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x13, 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, - 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x2a, 0x09, 0x08, 0xe8, - 0x07, 0x10, 0x80, 0x80, 0x80, 0x80, 0x02, 0x22, 0xd1, 0x02, 0x0a, 0x0b, 0x45, 0x6e, 0x75, 0x6d, - 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x1f, 0x0a, 0x0b, 0x61, 0x6c, 0x6c, 0x6f, 0x77, - 0x5f, 0x61, 0x6c, 0x69, 0x61, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0a, 0x61, 0x6c, - 0x6c, 0x6f, 0x77, 0x41, 0x6c, 0x69, 0x61, 0x73, 0x12, 0x25, 0x0a, 0x0a, 0x64, 0x65, 0x70, 0x72, - 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x18, 0x03, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, - 0x6c, 0x73, 0x65, 0x52, 0x0a, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x12, - 0x56, 0x0a, 0x26, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x5f, 0x6c, 0x65, - 0x67, 0x61, 0x63, 0x79, 0x5f, 0x6a, 0x73, 0x6f, 0x6e, 0x5f, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x5f, - 0x63, 0x6f, 0x6e, 0x66, 0x6c, 0x69, 0x63, 0x74, 0x73, 0x18, 0x06, 0x20, 0x01, 0x28, 0x08, 0x42, - 0x02, 0x18, 0x01, 0x52, 0x22, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x4c, - 0x65, 0x67, 0x61, 0x63, 0x79, 0x4a, 0x73, 0x6f, 0x6e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x43, 0x6f, - 0x6e, 0x66, 0x6c, 0x69, 0x63, 0x74, 0x73, 0x12, 0x37, 0x0a, 0x08, 0x66, 0x65, 0x61, 0x74, 0x75, - 0x72, 0x65, 0x73, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1b, 0x2e, 0x67, 0x6f, 0x6f, 0x67, - 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x65, 0x61, 0x74, - 0x75, 0x72, 0x65, 0x53, 0x65, 0x74, 0x52, 0x08, 0x66, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, - 0x12, 0x58, 0x0a, 0x14, 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, - 0x64, 0x5f, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0xe7, 0x07, 0x20, 0x03, 0x28, 0x0b, 0x32, - 0x24, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, - 0x66, 0x2e, 0x55, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, - 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x13, 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, - 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x2a, 0x09, 0x08, 0xe8, 0x07, 0x10, - 0x80, 0x80, 0x80, 0x80, 0x02, 0x4a, 0x04, 0x08, 0x05, 0x10, 0x06, 0x22, 0xd8, 0x02, 0x0a, 0x10, - 0x45, 0x6e, 0x75, 0x6d, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, - 0x12, 0x25, 0x0a, 0x0a, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x18, 0x01, - 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x0a, 0x64, 0x65, 0x70, - 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x12, 0x37, 0x0a, 0x08, 0x66, 0x65, 0x61, 0x74, 0x75, - 0x72, 0x65, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1b, 0x2e, 0x67, 0x6f, 0x6f, 0x67, - 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x65, 0x61, 0x74, - 0x75, 0x72, 0x65, 0x53, 0x65, 0x74, 0x52, 0x08, 0x66, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, - 0x12, 0x28, 0x0a, 0x0c, 0x64, 0x65, 0x62, 0x75, 0x67, 0x5f, 0x72, 0x65, 0x64, 0x61, 0x63, 0x74, - 0x18, 0x03, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x0b, 0x64, - 0x65, 0x62, 0x75, 0x67, 0x52, 0x65, 0x64, 0x61, 0x63, 0x74, 0x12, 0x55, 0x0a, 0x0f, 0x66, 0x65, - 0x61, 0x74, 0x75, 0x72, 0x65, 0x5f, 0x73, 0x75, 0x70, 0x70, 0x6f, 0x72, 0x74, 0x18, 0x04, 0x20, - 0x01, 0x28, 0x0b, 0x32, 0x2c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, - 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, - 0x6e, 0x73, 0x2e, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, 0x75, 0x70, 0x70, 0x6f, 0x72, - 0x74, 0x52, 0x0e, 0x66, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, 0x75, 0x70, 0x70, 0x6f, 0x72, - 0x74, 0x12, 0x58, 0x0a, 0x14, 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, - 0x65, 0x64, 0x5f, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0xe7, 0x07, 0x20, 0x03, 0x28, 0x0b, - 0x32, 0x24, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, - 0x75, 0x66, 0x2e, 0x55, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, - 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x13, 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, - 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x2a, 0x09, 0x08, 0xe8, 0x07, - 0x10, 0x80, 0x80, 0x80, 0x80, 0x02, 0x22, 0xd5, 0x01, 0x0a, 0x0e, 0x53, 0x65, 0x72, 0x76, 0x69, - 0x63, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x37, 0x0a, 0x08, 0x66, 0x65, 0x61, - 0x74, 0x75, 0x72, 0x65, 0x73, 0x18, 0x22, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1b, 0x2e, 0x67, 0x6f, - 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x65, - 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, 0x65, 0x74, 0x52, 0x08, 0x66, 0x65, 0x61, 0x74, 0x75, 0x72, - 0x65, 0x73, 0x12, 0x25, 0x0a, 0x0a, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, - 0x18, 0x21, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x0a, 0x64, - 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x12, 0x58, 0x0a, 0x14, 0x75, 0x6e, 0x69, - 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x5f, 0x6f, 0x70, 0x74, 0x69, 0x6f, - 0x6e, 0x18, 0xe7, 0x07, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, - 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x55, 0x6e, 0x69, 0x6e, 0x74, - 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x13, - 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, - 0x69, 0x6f, 0x6e, 0x2a, 0x09, 0x08, 0xe8, 0x07, 0x10, 0x80, 0x80, 0x80, 0x80, 0x02, 0x22, 0x99, - 0x03, 0x0a, 0x0d, 0x4d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, - 0x12, 0x25, 0x0a, 0x0a, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x18, 0x21, - 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x0a, 0x64, 0x65, 0x70, - 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x12, 0x71, 0x0a, 0x11, 0x69, 0x64, 0x65, 0x6d, 0x70, - 0x6f, 0x74, 0x65, 0x6e, 0x63, 0x79, 0x5f, 0x6c, 0x65, 0x76, 0x65, 0x6c, 0x18, 0x22, 0x20, 0x01, - 0x28, 0x0e, 0x32, 0x2f, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, - 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x4d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, - 0x6e, 0x73, 0x2e, 0x49, 0x64, 0x65, 0x6d, 0x70, 0x6f, 0x74, 0x65, 0x6e, 0x63, 0x79, 0x4c, 0x65, - 0x76, 0x65, 0x6c, 0x3a, 0x13, 0x49, 0x44, 0x45, 0x4d, 0x50, 0x4f, 0x54, 0x45, 0x4e, 0x43, 0x59, - 0x5f, 0x55, 0x4e, 0x4b, 0x4e, 0x4f, 0x57, 0x4e, 0x52, 0x10, 0x69, 0x64, 0x65, 0x6d, 0x70, 0x6f, - 0x74, 0x65, 0x6e, 0x63, 0x79, 0x4c, 0x65, 0x76, 0x65, 0x6c, 0x12, 0x37, 0x0a, 0x08, 0x66, 0x65, - 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, 0x18, 0x23, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1b, 0x2e, 0x67, - 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, - 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, 0x65, 0x74, 0x52, 0x08, 0x66, 0x65, 0x61, 0x74, 0x75, - 0x72, 0x65, 0x73, 0x12, 0x58, 0x0a, 0x14, 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, - 0x65, 0x74, 0x65, 0x64, 0x5f, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0xe7, 0x07, 0x20, 0x03, - 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, - 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x55, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, - 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x13, 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65, - 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x22, 0x50, 0x0a, - 0x10, 0x49, 0x64, 0x65, 0x6d, 0x70, 0x6f, 0x74, 0x65, 0x6e, 0x63, 0x79, 0x4c, 0x65, 0x76, 0x65, - 0x6c, 0x12, 0x17, 0x0a, 0x13, 0x49, 0x44, 0x45, 0x4d, 0x50, 0x4f, 0x54, 0x45, 0x4e, 0x43, 0x59, - 0x5f, 0x55, 0x4e, 0x4b, 0x4e, 0x4f, 0x57, 0x4e, 0x10, 0x00, 0x12, 0x13, 0x0a, 0x0f, 0x4e, 0x4f, - 0x5f, 0x53, 0x49, 0x44, 0x45, 0x5f, 0x45, 0x46, 0x46, 0x45, 0x43, 0x54, 0x53, 0x10, 0x01, 0x12, - 0x0e, 0x0a, 0x0a, 0x49, 0x44, 0x45, 0x4d, 0x50, 0x4f, 0x54, 0x45, 0x4e, 0x54, 0x10, 0x02, 0x2a, - 0x09, 0x08, 0xe8, 0x07, 0x10, 0x80, 0x80, 0x80, 0x80, 0x02, 0x22, 0x9a, 0x03, 0x0a, 0x13, 0x55, - 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, - 0x6f, 0x6e, 0x12, 0x41, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, - 0x32, 0x2d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, - 0x75, 0x66, 0x2e, 0x55, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, - 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x4e, 0x61, 0x6d, 0x65, 0x50, 0x61, 0x72, 0x74, 0x52, - 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x29, 0x0a, 0x10, 0x69, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x66, - 0x69, 0x65, 0x72, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, - 0x0f, 0x69, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x66, 0x69, 0x65, 0x72, 0x56, 0x61, 0x6c, 0x75, 0x65, - 0x12, 0x2c, 0x0a, 0x12, 0x70, 0x6f, 0x73, 0x69, 0x74, 0x69, 0x76, 0x65, 0x5f, 0x69, 0x6e, 0x74, - 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x04, 0x52, 0x10, 0x70, 0x6f, - 0x73, 0x69, 0x74, 0x69, 0x76, 0x65, 0x49, 0x6e, 0x74, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x2c, - 0x0a, 0x12, 0x6e, 0x65, 0x67, 0x61, 0x74, 0x69, 0x76, 0x65, 0x5f, 0x69, 0x6e, 0x74, 0x5f, 0x76, - 0x61, 0x6c, 0x75, 0x65, 0x18, 0x05, 0x20, 0x01, 0x28, 0x03, 0x52, 0x10, 0x6e, 0x65, 0x67, 0x61, - 0x74, 0x69, 0x76, 0x65, 0x49, 0x6e, 0x74, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x21, 0x0a, 0x0c, - 0x64, 0x6f, 0x75, 0x62, 0x6c, 0x65, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x06, 0x20, 0x01, - 0x28, 0x01, 0x52, 0x0b, 0x64, 0x6f, 0x75, 0x62, 0x6c, 0x65, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, - 0x21, 0x0a, 0x0c, 0x73, 0x74, 0x72, 0x69, 0x6e, 0x67, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, - 0x07, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x0b, 0x73, 0x74, 0x72, 0x69, 0x6e, 0x67, 0x56, 0x61, 0x6c, - 0x75, 0x65, 0x12, 0x27, 0x0a, 0x0f, 0x61, 0x67, 0x67, 0x72, 0x65, 0x67, 0x61, 0x74, 0x65, 0x5f, - 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x08, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0e, 0x61, 0x67, 0x67, - 0x72, 0x65, 0x67, 0x61, 0x74, 0x65, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x1a, 0x4a, 0x0a, 0x08, 0x4e, - 0x61, 0x6d, 0x65, 0x50, 0x61, 0x72, 0x74, 0x12, 0x1b, 0x0a, 0x09, 0x6e, 0x61, 0x6d, 0x65, 0x5f, - 0x70, 0x61, 0x72, 0x74, 0x18, 0x01, 0x20, 0x02, 0x28, 0x09, 0x52, 0x08, 0x6e, 0x61, 0x6d, 0x65, - 0x50, 0x61, 0x72, 0x74, 0x12, 0x21, 0x0a, 0x0c, 0x69, 0x73, 0x5f, 0x65, 0x78, 0x74, 0x65, 0x6e, - 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x02, 0x20, 0x02, 0x28, 0x08, 0x52, 0x0b, 0x69, 0x73, 0x45, 0x78, - 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x22, 0xa7, 0x0a, 0x0a, 0x0a, 0x46, 0x65, 0x61, 0x74, - 0x75, 0x72, 0x65, 0x53, 0x65, 0x74, 0x12, 0x91, 0x01, 0x0a, 0x0e, 0x66, 0x69, 0x65, 0x6c, 0x64, - 0x5f, 0x70, 0x72, 0x65, 0x73, 0x65, 0x6e, 0x63, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0e, 0x32, - 0x29, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, - 0x66, 0x2e, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, 0x65, 0x74, 0x2e, 0x46, 0x69, 0x65, - 0x6c, 0x64, 0x50, 0x72, 0x65, 0x73, 0x65, 0x6e, 0x63, 0x65, 0x42, 0x3f, 0x88, 0x01, 0x01, 0x98, - 0x01, 0x04, 0x98, 0x01, 0x01, 0xa2, 0x01, 0x0d, 0x12, 0x08, 0x45, 0x58, 0x50, 0x4c, 0x49, 0x43, - 0x49, 0x54, 0x18, 0x84, 0x07, 0xa2, 0x01, 0x0d, 0x12, 0x08, 0x49, 0x4d, 0x50, 0x4c, 0x49, 0x43, - 0x49, 0x54, 0x18, 0xe7, 0x07, 0xa2, 0x01, 0x0d, 0x12, 0x08, 0x45, 0x58, 0x50, 0x4c, 0x49, 0x43, - 0x49, 0x54, 0x18, 0xe8, 0x07, 0xb2, 0x01, 0x03, 0x08, 0xe8, 0x07, 0x52, 0x0d, 0x66, 0x69, 0x65, - 0x6c, 0x64, 0x50, 0x72, 0x65, 0x73, 0x65, 0x6e, 0x63, 0x65, 0x12, 0x6c, 0x0a, 0x09, 0x65, 0x6e, - 0x75, 0x6d, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x24, 0x2e, - 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, - 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, 0x65, 0x74, 0x2e, 0x45, 0x6e, 0x75, 0x6d, 0x54, - 0x79, 0x70, 0x65, 0x42, 0x29, 0x88, 0x01, 0x01, 0x98, 0x01, 0x06, 0x98, 0x01, 0x01, 0xa2, 0x01, - 0x0b, 0x12, 0x06, 0x43, 0x4c, 0x4f, 0x53, 0x45, 0x44, 0x18, 0x84, 0x07, 0xa2, 0x01, 0x09, 0x12, - 0x04, 0x4f, 0x50, 0x45, 0x4e, 0x18, 0xe7, 0x07, 0xb2, 0x01, 0x03, 0x08, 0xe8, 0x07, 0x52, 0x08, - 0x65, 0x6e, 0x75, 0x6d, 0x54, 0x79, 0x70, 0x65, 0x12, 0x98, 0x01, 0x0a, 0x17, 0x72, 0x65, 0x70, - 0x65, 0x61, 0x74, 0x65, 0x64, 0x5f, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x5f, 0x65, 0x6e, 0x63, 0x6f, - 0x64, 0x69, 0x6e, 0x67, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x31, 0x2e, 0x67, 0x6f, 0x6f, - 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x65, 0x61, - 0x74, 0x75, 0x72, 0x65, 0x53, 0x65, 0x74, 0x2e, 0x52, 0x65, 0x70, 0x65, 0x61, 0x74, 0x65, 0x64, - 0x46, 0x69, 0x65, 0x6c, 0x64, 0x45, 0x6e, 0x63, 0x6f, 0x64, 0x69, 0x6e, 0x67, 0x42, 0x2d, 0x88, - 0x01, 0x01, 0x98, 0x01, 0x04, 0x98, 0x01, 0x01, 0xa2, 0x01, 0x0d, 0x12, 0x08, 0x45, 0x58, 0x50, - 0x41, 0x4e, 0x44, 0x45, 0x44, 0x18, 0x84, 0x07, 0xa2, 0x01, 0x0b, 0x12, 0x06, 0x50, 0x41, 0x43, - 0x4b, 0x45, 0x44, 0x18, 0xe7, 0x07, 0xb2, 0x01, 0x03, 0x08, 0xe8, 0x07, 0x52, 0x15, 0x72, 0x65, - 0x70, 0x65, 0x61, 0x74, 0x65, 0x64, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x45, 0x6e, 0x63, 0x6f, 0x64, - 0x69, 0x6e, 0x67, 0x12, 0x7e, 0x0a, 0x0f, 0x75, 0x74, 0x66, 0x38, 0x5f, 0x76, 0x61, 0x6c, 0x69, - 0x64, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x2a, 0x2e, 0x67, - 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, - 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, 0x65, 0x74, 0x2e, 0x55, 0x74, 0x66, 0x38, 0x56, 0x61, - 0x6c, 0x69, 0x64, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x42, 0x29, 0x88, 0x01, 0x01, 0x98, 0x01, 0x04, - 0x98, 0x01, 0x01, 0xa2, 0x01, 0x09, 0x12, 0x04, 0x4e, 0x4f, 0x4e, 0x45, 0x18, 0x84, 0x07, 0xa2, - 0x01, 0x0b, 0x12, 0x06, 0x56, 0x45, 0x52, 0x49, 0x46, 0x59, 0x18, 0xe7, 0x07, 0xb2, 0x01, 0x03, - 0x08, 0xe8, 0x07, 0x52, 0x0e, 0x75, 0x74, 0x66, 0x38, 0x56, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, - 0x69, 0x6f, 0x6e, 0x12, 0x7e, 0x0a, 0x10, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x5f, 0x65, - 0x6e, 0x63, 0x6f, 0x64, 0x69, 0x6e, 0x67, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x2b, 0x2e, - 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, - 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, 0x65, 0x74, 0x2e, 0x4d, 0x65, 0x73, 0x73, 0x61, - 0x67, 0x65, 0x45, 0x6e, 0x63, 0x6f, 0x64, 0x69, 0x6e, 0x67, 0x42, 0x26, 0x88, 0x01, 0x01, 0x98, - 0x01, 0x04, 0x98, 0x01, 0x01, 0xa2, 0x01, 0x14, 0x12, 0x0f, 0x4c, 0x45, 0x4e, 0x47, 0x54, 0x48, - 0x5f, 0x50, 0x52, 0x45, 0x46, 0x49, 0x58, 0x45, 0x44, 0x18, 0x84, 0x07, 0xb2, 0x01, 0x03, 0x08, - 0xe8, 0x07, 0x52, 0x0f, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x45, 0x6e, 0x63, 0x6f, 0x64, - 0x69, 0x6e, 0x67, 0x12, 0x82, 0x01, 0x0a, 0x0b, 0x6a, 0x73, 0x6f, 0x6e, 0x5f, 0x66, 0x6f, 0x72, - 0x6d, 0x61, 0x74, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x26, 0x2e, 0x67, 0x6f, 0x6f, 0x67, - 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x65, 0x61, 0x74, - 0x75, 0x72, 0x65, 0x53, 0x65, 0x74, 0x2e, 0x4a, 0x73, 0x6f, 0x6e, 0x46, 0x6f, 0x72, 0x6d, 0x61, - 0x74, 0x42, 0x39, 0x88, 0x01, 0x01, 0x98, 0x01, 0x03, 0x98, 0x01, 0x06, 0x98, 0x01, 0x01, 0xa2, - 0x01, 0x17, 0x12, 0x12, 0x4c, 0x45, 0x47, 0x41, 0x43, 0x59, 0x5f, 0x42, 0x45, 0x53, 0x54, 0x5f, - 0x45, 0x46, 0x46, 0x4f, 0x52, 0x54, 0x18, 0x84, 0x07, 0xa2, 0x01, 0x0a, 0x12, 0x05, 0x41, 0x4c, - 0x4c, 0x4f, 0x57, 0x18, 0xe7, 0x07, 0xb2, 0x01, 0x03, 0x08, 0xe8, 0x07, 0x52, 0x0a, 0x6a, 0x73, - 0x6f, 0x6e, 0x46, 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x22, 0x5c, 0x0a, 0x0d, 0x46, 0x69, 0x65, 0x6c, - 0x64, 0x50, 0x72, 0x65, 0x73, 0x65, 0x6e, 0x63, 0x65, 0x12, 0x1a, 0x0a, 0x16, 0x46, 0x49, 0x45, - 0x4c, 0x44, 0x5f, 0x50, 0x52, 0x45, 0x53, 0x45, 0x4e, 0x43, 0x45, 0x5f, 0x55, 0x4e, 0x4b, 0x4e, - 0x4f, 0x57, 0x4e, 0x10, 0x00, 0x12, 0x0c, 0x0a, 0x08, 0x45, 0x58, 0x50, 0x4c, 0x49, 0x43, 0x49, - 0x54, 0x10, 0x01, 0x12, 0x0c, 0x0a, 0x08, 0x49, 0x4d, 0x50, 0x4c, 0x49, 0x43, 0x49, 0x54, 0x10, - 0x02, 0x12, 0x13, 0x0a, 0x0f, 0x4c, 0x45, 0x47, 0x41, 0x43, 0x59, 0x5f, 0x52, 0x45, 0x51, 0x55, - 0x49, 0x52, 0x45, 0x44, 0x10, 0x03, 0x22, 0x37, 0x0a, 0x08, 0x45, 0x6e, 0x75, 0x6d, 0x54, 0x79, - 0x70, 0x65, 0x12, 0x15, 0x0a, 0x11, 0x45, 0x4e, 0x55, 0x4d, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, - 0x55, 0x4e, 0x4b, 0x4e, 0x4f, 0x57, 0x4e, 0x10, 0x00, 0x12, 0x08, 0x0a, 0x04, 0x4f, 0x50, 0x45, - 0x4e, 0x10, 0x01, 0x12, 0x0a, 0x0a, 0x06, 0x43, 0x4c, 0x4f, 0x53, 0x45, 0x44, 0x10, 0x02, 0x22, - 0x56, 0x0a, 0x15, 0x52, 0x65, 0x70, 0x65, 0x61, 0x74, 0x65, 0x64, 0x46, 0x69, 0x65, 0x6c, 0x64, - 0x45, 0x6e, 0x63, 0x6f, 0x64, 0x69, 0x6e, 0x67, 0x12, 0x23, 0x0a, 0x1f, 0x52, 0x45, 0x50, 0x45, - 0x41, 0x54, 0x45, 0x44, 0x5f, 0x46, 0x49, 0x45, 0x4c, 0x44, 0x5f, 0x45, 0x4e, 0x43, 0x4f, 0x44, - 0x49, 0x4e, 0x47, 0x5f, 0x55, 0x4e, 0x4b, 0x4e, 0x4f, 0x57, 0x4e, 0x10, 0x00, 0x12, 0x0a, 0x0a, - 0x06, 0x50, 0x41, 0x43, 0x4b, 0x45, 0x44, 0x10, 0x01, 0x12, 0x0c, 0x0a, 0x08, 0x45, 0x58, 0x50, - 0x41, 0x4e, 0x44, 0x45, 0x44, 0x10, 0x02, 0x22, 0x49, 0x0a, 0x0e, 0x55, 0x74, 0x66, 0x38, 0x56, - 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x1b, 0x0a, 0x17, 0x55, 0x54, 0x46, - 0x38, 0x5f, 0x56, 0x41, 0x4c, 0x49, 0x44, 0x41, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x55, 0x4e, 0x4b, - 0x4e, 0x4f, 0x57, 0x4e, 0x10, 0x00, 0x12, 0x0a, 0x0a, 0x06, 0x56, 0x45, 0x52, 0x49, 0x46, 0x59, - 0x10, 0x02, 0x12, 0x08, 0x0a, 0x04, 0x4e, 0x4f, 0x4e, 0x45, 0x10, 0x03, 0x22, 0x04, 0x08, 0x01, - 0x10, 0x01, 0x22, 0x53, 0x0a, 0x0f, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x45, 0x6e, 0x63, - 0x6f, 0x64, 0x69, 0x6e, 0x67, 0x12, 0x1c, 0x0a, 0x18, 0x4d, 0x45, 0x53, 0x53, 0x41, 0x47, 0x45, - 0x5f, 0x45, 0x4e, 0x43, 0x4f, 0x44, 0x49, 0x4e, 0x47, 0x5f, 0x55, 0x4e, 0x4b, 0x4e, 0x4f, 0x57, - 0x4e, 0x10, 0x00, 0x12, 0x13, 0x0a, 0x0f, 0x4c, 0x45, 0x4e, 0x47, 0x54, 0x48, 0x5f, 0x50, 0x52, - 0x45, 0x46, 0x49, 0x58, 0x45, 0x44, 0x10, 0x01, 0x12, 0x0d, 0x0a, 0x09, 0x44, 0x45, 0x4c, 0x49, - 0x4d, 0x49, 0x54, 0x45, 0x44, 0x10, 0x02, 0x22, 0x48, 0x0a, 0x0a, 0x4a, 0x73, 0x6f, 0x6e, 0x46, - 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x12, 0x17, 0x0a, 0x13, 0x4a, 0x53, 0x4f, 0x4e, 0x5f, 0x46, 0x4f, - 0x52, 0x4d, 0x41, 0x54, 0x5f, 0x55, 0x4e, 0x4b, 0x4e, 0x4f, 0x57, 0x4e, 0x10, 0x00, 0x12, 0x09, - 0x0a, 0x05, 0x41, 0x4c, 0x4c, 0x4f, 0x57, 0x10, 0x01, 0x12, 0x16, 0x0a, 0x12, 0x4c, 0x45, 0x47, - 0x41, 0x43, 0x59, 0x5f, 0x42, 0x45, 0x53, 0x54, 0x5f, 0x45, 0x46, 0x46, 0x4f, 0x52, 0x54, 0x10, - 0x02, 0x2a, 0x06, 0x08, 0xe8, 0x07, 0x10, 0x8b, 0x4e, 0x2a, 0x06, 0x08, 0x8b, 0x4e, 0x10, 0x90, - 0x4e, 0x2a, 0x06, 0x08, 0x90, 0x4e, 0x10, 0x91, 0x4e, 0x4a, 0x06, 0x08, 0xe7, 0x07, 0x10, 0xe8, - 0x07, 0x22, 0xef, 0x03, 0x0a, 0x12, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, 0x65, 0x74, - 0x44, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x73, 0x12, 0x58, 0x0a, 0x08, 0x64, 0x65, 0x66, 0x61, - 0x75, 0x6c, 0x74, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x3c, 0x2e, 0x67, 0x6f, 0x6f, - 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x65, 0x61, - 0x74, 0x75, 0x72, 0x65, 0x53, 0x65, 0x74, 0x44, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x73, 0x2e, - 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, 0x65, 0x74, 0x45, 0x64, 0x69, 0x74, 0x69, 0x6f, - 0x6e, 0x44, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x52, 0x08, 0x64, 0x65, 0x66, 0x61, 0x75, 0x6c, - 0x74, 0x73, 0x12, 0x41, 0x0a, 0x0f, 0x6d, 0x69, 0x6e, 0x69, 0x6d, 0x75, 0x6d, 0x5f, 0x65, 0x64, - 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x18, 0x2e, 0x67, 0x6f, - 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x64, - 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x0e, 0x6d, 0x69, 0x6e, 0x69, 0x6d, 0x75, 0x6d, 0x45, 0x64, - 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x41, 0x0a, 0x0f, 0x6d, 0x61, 0x78, 0x69, 0x6d, 0x75, 0x6d, - 0x5f, 0x65, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x18, - 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, - 0x2e, 0x45, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x0e, 0x6d, 0x61, 0x78, 0x69, 0x6d, 0x75, - 0x6d, 0x45, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x1a, 0xf8, 0x01, 0x0a, 0x18, 0x46, 0x65, 0x61, - 0x74, 0x75, 0x72, 0x65, 0x53, 0x65, 0x74, 0x45, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x44, 0x65, - 0x66, 0x61, 0x75, 0x6c, 0x74, 0x12, 0x32, 0x0a, 0x07, 0x65, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, - 0x18, 0x03, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x18, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, - 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, - 0x52, 0x07, 0x65, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x4e, 0x0a, 0x14, 0x6f, 0x76, 0x65, - 0x72, 0x72, 0x69, 0x64, 0x61, 0x62, 0x6c, 0x65, 0x5f, 0x66, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, - 0x73, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1b, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, - 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, - 0x65, 0x53, 0x65, 0x74, 0x52, 0x13, 0x6f, 0x76, 0x65, 0x72, 0x72, 0x69, 0x64, 0x61, 0x62, 0x6c, - 0x65, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, 0x12, 0x42, 0x0a, 0x0e, 0x66, 0x69, 0x78, - 0x65, 0x64, 0x5f, 0x66, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, 0x18, 0x05, 0x20, 0x01, 0x28, - 0x0b, 0x32, 0x1b, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, - 0x62, 0x75, 0x66, 0x2e, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, 0x65, 0x74, 0x52, 0x0d, - 0x66, 0x69, 0x78, 0x65, 0x64, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, 0x4a, 0x04, 0x08, - 0x01, 0x10, 0x02, 0x4a, 0x04, 0x08, 0x02, 0x10, 0x03, 0x52, 0x08, 0x66, 0x65, 0x61, 0x74, 0x75, - 0x72, 0x65, 0x73, 0x22, 0xb5, 0x02, 0x0a, 0x0e, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x43, 0x6f, - 0x64, 0x65, 0x49, 0x6e, 0x66, 0x6f, 0x12, 0x44, 0x0a, 0x08, 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x69, - 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x28, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, - 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x53, 0x6f, 0x75, 0x72, 0x63, - 0x65, 0x43, 0x6f, 0x64, 0x65, 0x49, 0x6e, 0x66, 0x6f, 0x2e, 0x4c, 0x6f, 0x63, 0x61, 0x74, 0x69, - 0x6f, 0x6e, 0x52, 0x08, 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x1a, 0xce, 0x01, 0x0a, - 0x08, 0x4c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x16, 0x0a, 0x04, 0x70, 0x61, 0x74, - 0x68, 0x18, 0x01, 0x20, 0x03, 0x28, 0x05, 0x42, 0x02, 0x10, 0x01, 0x52, 0x04, 0x70, 0x61, 0x74, - 0x68, 0x12, 0x16, 0x0a, 0x04, 0x73, 0x70, 0x61, 0x6e, 0x18, 0x02, 0x20, 0x03, 0x28, 0x05, 0x42, - 0x02, 0x10, 0x01, 0x52, 0x04, 0x73, 0x70, 0x61, 0x6e, 0x12, 0x29, 0x0a, 0x10, 0x6c, 0x65, 0x61, - 0x64, 0x69, 0x6e, 0x67, 0x5f, 0x63, 0x6f, 0x6d, 0x6d, 0x65, 0x6e, 0x74, 0x73, 0x18, 0x03, 0x20, - 0x01, 0x28, 0x09, 0x52, 0x0f, 0x6c, 0x65, 0x61, 0x64, 0x69, 0x6e, 0x67, 0x43, 0x6f, 0x6d, 0x6d, - 0x65, 0x6e, 0x74, 0x73, 0x12, 0x2b, 0x0a, 0x11, 0x74, 0x72, 0x61, 0x69, 0x6c, 0x69, 0x6e, 0x67, - 0x5f, 0x63, 0x6f, 0x6d, 0x6d, 0x65, 0x6e, 0x74, 0x73, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, - 0x10, 0x74, 0x72, 0x61, 0x69, 0x6c, 0x69, 0x6e, 0x67, 0x43, 0x6f, 0x6d, 0x6d, 0x65, 0x6e, 0x74, - 0x73, 0x12, 0x3a, 0x0a, 0x19, 0x6c, 0x65, 0x61, 0x64, 0x69, 0x6e, 0x67, 0x5f, 0x64, 0x65, 0x74, - 0x61, 0x63, 0x68, 0x65, 0x64, 0x5f, 0x63, 0x6f, 0x6d, 0x6d, 0x65, 0x6e, 0x74, 0x73, 0x18, 0x06, - 0x20, 0x03, 0x28, 0x09, 0x52, 0x17, 0x6c, 0x65, 0x61, 0x64, 0x69, 0x6e, 0x67, 0x44, 0x65, 0x74, - 0x61, 0x63, 0x68, 0x65, 0x64, 0x43, 0x6f, 0x6d, 0x6d, 0x65, 0x6e, 0x74, 0x73, 0x2a, 0x0c, 0x08, - 0x80, 0xec, 0xca, 0xff, 0x01, 0x10, 0x81, 0xec, 0xca, 0xff, 0x01, 0x22, 0xd0, 0x02, 0x0a, 0x11, - 0x47, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x65, 0x64, 0x43, 0x6f, 0x64, 0x65, 0x49, 0x6e, 0x66, - 0x6f, 0x12, 0x4d, 0x0a, 0x0a, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, - 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x2d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, - 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x47, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x65, - 0x64, 0x43, 0x6f, 0x64, 0x65, 0x49, 0x6e, 0x66, 0x6f, 0x2e, 0x41, 0x6e, 0x6e, 0x6f, 0x74, 0x61, - 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x0a, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, - 0x1a, 0xeb, 0x01, 0x0a, 0x0a, 0x41, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, - 0x16, 0x0a, 0x04, 0x70, 0x61, 0x74, 0x68, 0x18, 0x01, 0x20, 0x03, 0x28, 0x05, 0x42, 0x02, 0x10, - 0x01, 0x52, 0x04, 0x70, 0x61, 0x74, 0x68, 0x12, 0x1f, 0x0a, 0x0b, 0x73, 0x6f, 0x75, 0x72, 0x63, - 0x65, 0x5f, 0x66, 0x69, 0x6c, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0a, 0x73, 0x6f, - 0x75, 0x72, 0x63, 0x65, 0x46, 0x69, 0x6c, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x62, 0x65, 0x67, 0x69, - 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x05, 0x52, 0x05, 0x62, 0x65, 0x67, 0x69, 0x6e, 0x12, 0x10, - 0x0a, 0x03, 0x65, 0x6e, 0x64, 0x18, 0x04, 0x20, 0x01, 0x28, 0x05, 0x52, 0x03, 0x65, 0x6e, 0x64, - 0x12, 0x52, 0x0a, 0x08, 0x73, 0x65, 0x6d, 0x61, 0x6e, 0x74, 0x69, 0x63, 0x18, 0x05, 0x20, 0x01, - 0x28, 0x0e, 0x32, 0x36, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, - 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x47, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x65, 0x64, 0x43, 0x6f, - 0x64, 0x65, 0x49, 0x6e, 0x66, 0x6f, 0x2e, 0x41, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, - 0x6e, 0x2e, 0x53, 0x65, 0x6d, 0x61, 0x6e, 0x74, 0x69, 0x63, 0x52, 0x08, 0x73, 0x65, 0x6d, 0x61, - 0x6e, 0x74, 0x69, 0x63, 0x22, 0x28, 0x0a, 0x08, 0x53, 0x65, 0x6d, 0x61, 0x6e, 0x74, 0x69, 0x63, - 0x12, 0x08, 0x0a, 0x04, 0x4e, 0x4f, 0x4e, 0x45, 0x10, 0x00, 0x12, 0x07, 0x0a, 0x03, 0x53, 0x45, - 0x54, 0x10, 0x01, 0x12, 0x09, 0x0a, 0x05, 0x41, 0x4c, 0x49, 0x41, 0x53, 0x10, 0x02, 0x2a, 0xa7, - 0x02, 0x0a, 0x07, 0x45, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x13, 0x0a, 0x0f, 0x45, 0x44, - 0x49, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x55, 0x4e, 0x4b, 0x4e, 0x4f, 0x57, 0x4e, 0x10, 0x00, 0x12, - 0x13, 0x0a, 0x0e, 0x45, 0x44, 0x49, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x4c, 0x45, 0x47, 0x41, 0x43, - 0x59, 0x10, 0x84, 0x07, 0x12, 0x13, 0x0a, 0x0e, 0x45, 0x44, 0x49, 0x54, 0x49, 0x4f, 0x4e, 0x5f, - 0x50, 0x52, 0x4f, 0x54, 0x4f, 0x32, 0x10, 0xe6, 0x07, 0x12, 0x13, 0x0a, 0x0e, 0x45, 0x44, 0x49, - 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x50, 0x52, 0x4f, 0x54, 0x4f, 0x33, 0x10, 0xe7, 0x07, 0x12, 0x11, - 0x0a, 0x0c, 0x45, 0x44, 0x49, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x32, 0x30, 0x32, 0x33, 0x10, 0xe8, - 0x07, 0x12, 0x11, 0x0a, 0x0c, 0x45, 0x44, 0x49, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x32, 0x30, 0x32, - 0x34, 0x10, 0xe9, 0x07, 0x12, 0x17, 0x0a, 0x13, 0x45, 0x44, 0x49, 0x54, 0x49, 0x4f, 0x4e, 0x5f, - 0x31, 0x5f, 0x54, 0x45, 0x53, 0x54, 0x5f, 0x4f, 0x4e, 0x4c, 0x59, 0x10, 0x01, 0x12, 0x17, 0x0a, - 0x13, 0x45, 0x44, 0x49, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x32, 0x5f, 0x54, 0x45, 0x53, 0x54, 0x5f, - 0x4f, 0x4e, 0x4c, 0x59, 0x10, 0x02, 0x12, 0x1d, 0x0a, 0x17, 0x45, 0x44, 0x49, 0x54, 0x49, 0x4f, - 0x4e, 0x5f, 0x39, 0x39, 0x39, 0x39, 0x37, 0x5f, 0x54, 0x45, 0x53, 0x54, 0x5f, 0x4f, 0x4e, 0x4c, - 0x59, 0x10, 0x9d, 0x8d, 0x06, 0x12, 0x1d, 0x0a, 0x17, 0x45, 0x44, 0x49, 0x54, 0x49, 0x4f, 0x4e, - 0x5f, 0x39, 0x39, 0x39, 0x39, 0x38, 0x5f, 0x54, 0x45, 0x53, 0x54, 0x5f, 0x4f, 0x4e, 0x4c, 0x59, - 0x10, 0x9e, 0x8d, 0x06, 0x12, 0x1d, 0x0a, 0x17, 0x45, 0x44, 0x49, 0x54, 0x49, 0x4f, 0x4e, 0x5f, - 0x39, 0x39, 0x39, 0x39, 0x39, 0x5f, 0x54, 0x45, 0x53, 0x54, 0x5f, 0x4f, 0x4e, 0x4c, 0x59, 0x10, - 0x9f, 0x8d, 0x06, 0x12, 0x13, 0x0a, 0x0b, 0x45, 0x44, 0x49, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x4d, - 0x41, 0x58, 0x10, 0xff, 0xff, 0xff, 0xff, 0x07, 0x42, 0x7e, 0x0a, 0x13, 0x63, 0x6f, 0x6d, 0x2e, - 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x42, - 0x10, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, - 0x73, 0x48, 0x01, 0x5a, 0x2d, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x67, 0x6f, 0x6c, 0x61, - 0x6e, 0x67, 0x2e, 0x6f, 0x72, 0x67, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, - 0x74, 0x79, 0x70, 0x65, 0x73, 0x2f, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, - 0x70, 0x62, 0xf8, 0x01, 0x01, 0xa2, 0x02, 0x03, 0x47, 0x50, 0x42, 0xaa, 0x02, 0x1a, 0x47, 0x6f, - 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x52, 0x65, - 0x66, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, -}) +const file_google_protobuf_descriptor_proto_rawDesc = "" + + "\n" + + " google/protobuf/descriptor.proto\x12\x0fgoogle.protobuf\"[\n" + + "\x11FileDescriptorSet\x128\n" + + "\x04file\x18\x01 \x03(\v2$.google.protobuf.FileDescriptorProtoR\x04file*\f\b\x80\xec\xca\xff\x01\x10\x81\xec\xca\xff\x01\"\xc5\x05\n" + + "\x13FileDescriptorProto\x12\x12\n" + + "\x04name\x18\x01 \x01(\tR\x04name\x12\x18\n" + + "\apackage\x18\x02 \x01(\tR\apackage\x12\x1e\n" + + "\n" + + "dependency\x18\x03 \x03(\tR\n" + + "dependency\x12+\n" + + "\x11public_dependency\x18\n" + + " \x03(\x05R\x10publicDependency\x12'\n" + + "\x0fweak_dependency\x18\v \x03(\x05R\x0eweakDependency\x12+\n" + + "\x11option_dependency\x18\x0f \x03(\tR\x10optionDependency\x12C\n" + + "\fmessage_type\x18\x04 \x03(\v2 .google.protobuf.DescriptorProtoR\vmessageType\x12A\n" + + "\tenum_type\x18\x05 \x03(\v2$.google.protobuf.EnumDescriptorProtoR\benumType\x12A\n" + + "\aservice\x18\x06 \x03(\v2'.google.protobuf.ServiceDescriptorProtoR\aservice\x12C\n" + + "\textension\x18\a \x03(\v2%.google.protobuf.FieldDescriptorProtoR\textension\x126\n" + + "\aoptions\x18\b \x01(\v2\x1c.google.protobuf.FileOptionsR\aoptions\x12I\n" + + "\x10source_code_info\x18\t \x01(\v2\x1f.google.protobuf.SourceCodeInfoR\x0esourceCodeInfo\x12\x16\n" + + "\x06syntax\x18\f \x01(\tR\x06syntax\x122\n" + + "\aedition\x18\x0e \x01(\x0e2\x18.google.protobuf.EditionR\aedition\"\xfc\x06\n" + + "\x0fDescriptorProto\x12\x12\n" + + "\x04name\x18\x01 \x01(\tR\x04name\x12;\n" + + "\x05field\x18\x02 \x03(\v2%.google.protobuf.FieldDescriptorProtoR\x05field\x12C\n" + + "\textension\x18\x06 \x03(\v2%.google.protobuf.FieldDescriptorProtoR\textension\x12A\n" + + "\vnested_type\x18\x03 \x03(\v2 .google.protobuf.DescriptorProtoR\n" + + "nestedType\x12A\n" + + "\tenum_type\x18\x04 \x03(\v2$.google.protobuf.EnumDescriptorProtoR\benumType\x12X\n" + + "\x0fextension_range\x18\x05 \x03(\v2/.google.protobuf.DescriptorProto.ExtensionRangeR\x0eextensionRange\x12D\n" + + "\n" + + "oneof_decl\x18\b \x03(\v2%.google.protobuf.OneofDescriptorProtoR\toneofDecl\x129\n" + + "\aoptions\x18\a \x01(\v2\x1f.google.protobuf.MessageOptionsR\aoptions\x12U\n" + + "\x0ereserved_range\x18\t \x03(\v2..google.protobuf.DescriptorProto.ReservedRangeR\rreservedRange\x12#\n" + + "\rreserved_name\x18\n" + + " \x03(\tR\freservedName\x12A\n" + + "\n" + + "visibility\x18\v \x01(\x0e2!.google.protobuf.SymbolVisibilityR\n" + + "visibility\x1az\n" + + "\x0eExtensionRange\x12\x14\n" + + "\x05start\x18\x01 \x01(\x05R\x05start\x12\x10\n" + + "\x03end\x18\x02 \x01(\x05R\x03end\x12@\n" + + "\aoptions\x18\x03 \x01(\v2&.google.protobuf.ExtensionRangeOptionsR\aoptions\x1a7\n" + + "\rReservedRange\x12\x14\n" + + "\x05start\x18\x01 \x01(\x05R\x05start\x12\x10\n" + + "\x03end\x18\x02 \x01(\x05R\x03end\"\xcc\x04\n" + + "\x15ExtensionRangeOptions\x12X\n" + + "\x14uninterpreted_option\x18\xe7\a \x03(\v2$.google.protobuf.UninterpretedOptionR\x13uninterpretedOption\x12Y\n" + + "\vdeclaration\x18\x02 \x03(\v22.google.protobuf.ExtensionRangeOptions.DeclarationB\x03\x88\x01\x02R\vdeclaration\x127\n" + + "\bfeatures\x182 \x01(\v2\x1b.google.protobuf.FeatureSetR\bfeatures\x12m\n" + + "\fverification\x18\x03 \x01(\x0e28.google.protobuf.ExtensionRangeOptions.VerificationState:\n" + + "UNVERIFIEDB\x03\x88\x01\x02R\fverification\x1a\x94\x01\n" + + "\vDeclaration\x12\x16\n" + + "\x06number\x18\x01 \x01(\x05R\x06number\x12\x1b\n" + + "\tfull_name\x18\x02 \x01(\tR\bfullName\x12\x12\n" + + "\x04type\x18\x03 \x01(\tR\x04type\x12\x1a\n" + + "\breserved\x18\x05 \x01(\bR\breserved\x12\x1a\n" + + "\brepeated\x18\x06 \x01(\bR\brepeatedJ\x04\b\x04\x10\x05\"4\n" + + "\x11VerificationState\x12\x0f\n" + + "\vDECLARATION\x10\x00\x12\x0e\n" + + "\n" + + "UNVERIFIED\x10\x01*\t\b\xe8\a\x10\x80\x80\x80\x80\x02\"\xc1\x06\n" + + "\x14FieldDescriptorProto\x12\x12\n" + + "\x04name\x18\x01 \x01(\tR\x04name\x12\x16\n" + + "\x06number\x18\x03 \x01(\x05R\x06number\x12A\n" + + "\x05label\x18\x04 \x01(\x0e2+.google.protobuf.FieldDescriptorProto.LabelR\x05label\x12>\n" + + "\x04type\x18\x05 \x01(\x0e2*.google.protobuf.FieldDescriptorProto.TypeR\x04type\x12\x1b\n" + + "\ttype_name\x18\x06 \x01(\tR\btypeName\x12\x1a\n" + + "\bextendee\x18\x02 \x01(\tR\bextendee\x12#\n" + + "\rdefault_value\x18\a \x01(\tR\fdefaultValue\x12\x1f\n" + + "\voneof_index\x18\t \x01(\x05R\n" + + "oneofIndex\x12\x1b\n" + + "\tjson_name\x18\n" + + " \x01(\tR\bjsonName\x127\n" + + "\aoptions\x18\b \x01(\v2\x1d.google.protobuf.FieldOptionsR\aoptions\x12'\n" + + "\x0fproto3_optional\x18\x11 \x01(\bR\x0eproto3Optional\"\xb6\x02\n" + + "\x04Type\x12\x0f\n" + + "\vTYPE_DOUBLE\x10\x01\x12\x0e\n" + + "\n" + + "TYPE_FLOAT\x10\x02\x12\x0e\n" + + "\n" + + "TYPE_INT64\x10\x03\x12\x0f\n" + + "\vTYPE_UINT64\x10\x04\x12\x0e\n" + + "\n" + + "TYPE_INT32\x10\x05\x12\x10\n" + + "\fTYPE_FIXED64\x10\x06\x12\x10\n" + + "\fTYPE_FIXED32\x10\a\x12\r\n" + + "\tTYPE_BOOL\x10\b\x12\x0f\n" + + "\vTYPE_STRING\x10\t\x12\x0e\n" + + "\n" + + "TYPE_GROUP\x10\n" + + "\x12\x10\n" + + "\fTYPE_MESSAGE\x10\v\x12\x0e\n" + + "\n" + + "TYPE_BYTES\x10\f\x12\x0f\n" + + "\vTYPE_UINT32\x10\r\x12\r\n" + + "\tTYPE_ENUM\x10\x0e\x12\x11\n" + + "\rTYPE_SFIXED32\x10\x0f\x12\x11\n" + + "\rTYPE_SFIXED64\x10\x10\x12\x0f\n" + + "\vTYPE_SINT32\x10\x11\x12\x0f\n" + + "\vTYPE_SINT64\x10\x12\"C\n" + + "\x05Label\x12\x12\n" + + "\x0eLABEL_OPTIONAL\x10\x01\x12\x12\n" + + "\x0eLABEL_REPEATED\x10\x03\x12\x12\n" + + "\x0eLABEL_REQUIRED\x10\x02\"c\n" + + "\x14OneofDescriptorProto\x12\x12\n" + + "\x04name\x18\x01 \x01(\tR\x04name\x127\n" + + "\aoptions\x18\x02 \x01(\v2\x1d.google.protobuf.OneofOptionsR\aoptions\"\xa6\x03\n" + + "\x13EnumDescriptorProto\x12\x12\n" + + "\x04name\x18\x01 \x01(\tR\x04name\x12?\n" + + "\x05value\x18\x02 \x03(\v2).google.protobuf.EnumValueDescriptorProtoR\x05value\x126\n" + + "\aoptions\x18\x03 \x01(\v2\x1c.google.protobuf.EnumOptionsR\aoptions\x12]\n" + + "\x0ereserved_range\x18\x04 \x03(\v26.google.protobuf.EnumDescriptorProto.EnumReservedRangeR\rreservedRange\x12#\n" + + "\rreserved_name\x18\x05 \x03(\tR\freservedName\x12A\n" + + "\n" + + "visibility\x18\x06 \x01(\x0e2!.google.protobuf.SymbolVisibilityR\n" + + "visibility\x1a;\n" + + "\x11EnumReservedRange\x12\x14\n" + + "\x05start\x18\x01 \x01(\x05R\x05start\x12\x10\n" + + "\x03end\x18\x02 \x01(\x05R\x03end\"\x83\x01\n" + + "\x18EnumValueDescriptorProto\x12\x12\n" + + "\x04name\x18\x01 \x01(\tR\x04name\x12\x16\n" + + "\x06number\x18\x02 \x01(\x05R\x06number\x12;\n" + + "\aoptions\x18\x03 \x01(\v2!.google.protobuf.EnumValueOptionsR\aoptions\"\xa7\x01\n" + + "\x16ServiceDescriptorProto\x12\x12\n" + + "\x04name\x18\x01 \x01(\tR\x04name\x12>\n" + + "\x06method\x18\x02 \x03(\v2&.google.protobuf.MethodDescriptorProtoR\x06method\x129\n" + + "\aoptions\x18\x03 \x01(\v2\x1f.google.protobuf.ServiceOptionsR\aoptions\"\x89\x02\n" + + "\x15MethodDescriptorProto\x12\x12\n" + + "\x04name\x18\x01 \x01(\tR\x04name\x12\x1d\n" + + "\n" + + "input_type\x18\x02 \x01(\tR\tinputType\x12\x1f\n" + + "\voutput_type\x18\x03 \x01(\tR\n" + + "outputType\x128\n" + + "\aoptions\x18\x04 \x01(\v2\x1e.google.protobuf.MethodOptionsR\aoptions\x120\n" + + "\x10client_streaming\x18\x05 \x01(\b:\x05falseR\x0fclientStreaming\x120\n" + + "\x10server_streaming\x18\x06 \x01(\b:\x05falseR\x0fserverStreaming\"\xad\t\n" + + "\vFileOptions\x12!\n" + + "\fjava_package\x18\x01 \x01(\tR\vjavaPackage\x120\n" + + "\x14java_outer_classname\x18\b \x01(\tR\x12javaOuterClassname\x125\n" + + "\x13java_multiple_files\x18\n" + + " \x01(\b:\x05falseR\x11javaMultipleFiles\x12D\n" + + "\x1djava_generate_equals_and_hash\x18\x14 \x01(\bB\x02\x18\x01R\x19javaGenerateEqualsAndHash\x12:\n" + + "\x16java_string_check_utf8\x18\x1b \x01(\b:\x05falseR\x13javaStringCheckUtf8\x12S\n" + + "\foptimize_for\x18\t \x01(\x0e2).google.protobuf.FileOptions.OptimizeMode:\x05SPEEDR\voptimizeFor\x12\x1d\n" + + "\n" + + "go_package\x18\v \x01(\tR\tgoPackage\x125\n" + + "\x13cc_generic_services\x18\x10 \x01(\b:\x05falseR\x11ccGenericServices\x129\n" + + "\x15java_generic_services\x18\x11 \x01(\b:\x05falseR\x13javaGenericServices\x125\n" + + "\x13py_generic_services\x18\x12 \x01(\b:\x05falseR\x11pyGenericServices\x12%\n" + + "\n" + + "deprecated\x18\x17 \x01(\b:\x05falseR\n" + + "deprecated\x12.\n" + + "\x10cc_enable_arenas\x18\x1f \x01(\b:\x04trueR\x0eccEnableArenas\x12*\n" + + "\x11objc_class_prefix\x18$ \x01(\tR\x0fobjcClassPrefix\x12)\n" + + "\x10csharp_namespace\x18% \x01(\tR\x0fcsharpNamespace\x12!\n" + + "\fswift_prefix\x18' \x01(\tR\vswiftPrefix\x12(\n" + + "\x10php_class_prefix\x18( \x01(\tR\x0ephpClassPrefix\x12#\n" + + "\rphp_namespace\x18) \x01(\tR\fphpNamespace\x124\n" + + "\x16php_metadata_namespace\x18, \x01(\tR\x14phpMetadataNamespace\x12!\n" + + "\fruby_package\x18- \x01(\tR\vrubyPackage\x127\n" + + "\bfeatures\x182 \x01(\v2\x1b.google.protobuf.FeatureSetR\bfeatures\x12X\n" + + "\x14uninterpreted_option\x18\xe7\a \x03(\v2$.google.protobuf.UninterpretedOptionR\x13uninterpretedOption\":\n" + + "\fOptimizeMode\x12\t\n" + + "\x05SPEED\x10\x01\x12\r\n" + + "\tCODE_SIZE\x10\x02\x12\x10\n" + + "\fLITE_RUNTIME\x10\x03*\t\b\xe8\a\x10\x80\x80\x80\x80\x02J\x04\b*\x10+J\x04\b&\x10'R\x14php_generic_services\"\xf4\x03\n" + + "\x0eMessageOptions\x12<\n" + + "\x17message_set_wire_format\x18\x01 \x01(\b:\x05falseR\x14messageSetWireFormat\x12L\n" + + "\x1fno_standard_descriptor_accessor\x18\x02 \x01(\b:\x05falseR\x1cnoStandardDescriptorAccessor\x12%\n" + + "\n" + + "deprecated\x18\x03 \x01(\b:\x05falseR\n" + + "deprecated\x12\x1b\n" + + "\tmap_entry\x18\a \x01(\bR\bmapEntry\x12V\n" + + "&deprecated_legacy_json_field_conflicts\x18\v \x01(\bB\x02\x18\x01R\"deprecatedLegacyJsonFieldConflicts\x127\n" + + "\bfeatures\x18\f \x01(\v2\x1b.google.protobuf.FeatureSetR\bfeatures\x12X\n" + + "\x14uninterpreted_option\x18\xe7\a \x03(\v2$.google.protobuf.UninterpretedOptionR\x13uninterpretedOption*\t\b\xe8\a\x10\x80\x80\x80\x80\x02J\x04\b\x04\x10\x05J\x04\b\x05\x10\x06J\x04\b\x06\x10\aJ\x04\b\b\x10\tJ\x04\b\t\x10\n" + + "\"\xa1\r\n" + + "\fFieldOptions\x12A\n" + + "\x05ctype\x18\x01 \x01(\x0e2#.google.protobuf.FieldOptions.CType:\x06STRINGR\x05ctype\x12\x16\n" + + "\x06packed\x18\x02 \x01(\bR\x06packed\x12G\n" + + "\x06jstype\x18\x06 \x01(\x0e2$.google.protobuf.FieldOptions.JSType:\tJS_NORMALR\x06jstype\x12\x19\n" + + "\x04lazy\x18\x05 \x01(\b:\x05falseR\x04lazy\x12.\n" + + "\x0funverified_lazy\x18\x0f \x01(\b:\x05falseR\x0eunverifiedLazy\x12%\n" + + "\n" + + "deprecated\x18\x03 \x01(\b:\x05falseR\n" + + "deprecated\x12\x1d\n" + + "\x04weak\x18\n" + + " \x01(\b:\x05falseB\x02\x18\x01R\x04weak\x12(\n" + + "\fdebug_redact\x18\x10 \x01(\b:\x05falseR\vdebugRedact\x12K\n" + + "\tretention\x18\x11 \x01(\x0e2-.google.protobuf.FieldOptions.OptionRetentionR\tretention\x12H\n" + + "\atargets\x18\x13 \x03(\x0e2..google.protobuf.FieldOptions.OptionTargetTypeR\atargets\x12W\n" + + "\x10edition_defaults\x18\x14 \x03(\v2,.google.protobuf.FieldOptions.EditionDefaultR\x0feditionDefaults\x127\n" + + "\bfeatures\x18\x15 \x01(\v2\x1b.google.protobuf.FeatureSetR\bfeatures\x12U\n" + + "\x0ffeature_support\x18\x16 \x01(\v2,.google.protobuf.FieldOptions.FeatureSupportR\x0efeatureSupport\x12X\n" + + "\x14uninterpreted_option\x18\xe7\a \x03(\v2$.google.protobuf.UninterpretedOptionR\x13uninterpretedOption\x1aZ\n" + + "\x0eEditionDefault\x122\n" + + "\aedition\x18\x03 \x01(\x0e2\x18.google.protobuf.EditionR\aedition\x12\x14\n" + + "\x05value\x18\x02 \x01(\tR\x05value\x1a\x96\x02\n" + + "\x0eFeatureSupport\x12G\n" + + "\x12edition_introduced\x18\x01 \x01(\x0e2\x18.google.protobuf.EditionR\x11editionIntroduced\x12G\n" + + "\x12edition_deprecated\x18\x02 \x01(\x0e2\x18.google.protobuf.EditionR\x11editionDeprecated\x12/\n" + + "\x13deprecation_warning\x18\x03 \x01(\tR\x12deprecationWarning\x12A\n" + + "\x0fedition_removed\x18\x04 \x01(\x0e2\x18.google.protobuf.EditionR\x0eeditionRemoved\"/\n" + + "\x05CType\x12\n" + + "\n" + + "\x06STRING\x10\x00\x12\b\n" + + "\x04CORD\x10\x01\x12\x10\n" + + "\fSTRING_PIECE\x10\x02\"5\n" + + "\x06JSType\x12\r\n" + + "\tJS_NORMAL\x10\x00\x12\r\n" + + "\tJS_STRING\x10\x01\x12\r\n" + + "\tJS_NUMBER\x10\x02\"U\n" + + "\x0fOptionRetention\x12\x15\n" + + "\x11RETENTION_UNKNOWN\x10\x00\x12\x15\n" + + "\x11RETENTION_RUNTIME\x10\x01\x12\x14\n" + + "\x10RETENTION_SOURCE\x10\x02\"\x8c\x02\n" + + "\x10OptionTargetType\x12\x17\n" + + "\x13TARGET_TYPE_UNKNOWN\x10\x00\x12\x14\n" + + "\x10TARGET_TYPE_FILE\x10\x01\x12\x1f\n" + + "\x1bTARGET_TYPE_EXTENSION_RANGE\x10\x02\x12\x17\n" + + "\x13TARGET_TYPE_MESSAGE\x10\x03\x12\x15\n" + + "\x11TARGET_TYPE_FIELD\x10\x04\x12\x15\n" + + "\x11TARGET_TYPE_ONEOF\x10\x05\x12\x14\n" + + "\x10TARGET_TYPE_ENUM\x10\x06\x12\x1a\n" + + "\x16TARGET_TYPE_ENUM_ENTRY\x10\a\x12\x17\n" + + "\x13TARGET_TYPE_SERVICE\x10\b\x12\x16\n" + + "\x12TARGET_TYPE_METHOD\x10\t*\t\b\xe8\a\x10\x80\x80\x80\x80\x02J\x04\b\x04\x10\x05J\x04\b\x12\x10\x13\"\xac\x01\n" + + "\fOneofOptions\x127\n" + + "\bfeatures\x18\x01 \x01(\v2\x1b.google.protobuf.FeatureSetR\bfeatures\x12X\n" + + "\x14uninterpreted_option\x18\xe7\a \x03(\v2$.google.protobuf.UninterpretedOptionR\x13uninterpretedOption*\t\b\xe8\a\x10\x80\x80\x80\x80\x02\"\xd1\x02\n" + + "\vEnumOptions\x12\x1f\n" + + "\vallow_alias\x18\x02 \x01(\bR\n" + + "allowAlias\x12%\n" + + "\n" + + "deprecated\x18\x03 \x01(\b:\x05falseR\n" + + "deprecated\x12V\n" + + "&deprecated_legacy_json_field_conflicts\x18\x06 \x01(\bB\x02\x18\x01R\"deprecatedLegacyJsonFieldConflicts\x127\n" + + "\bfeatures\x18\a \x01(\v2\x1b.google.protobuf.FeatureSetR\bfeatures\x12X\n" + + "\x14uninterpreted_option\x18\xe7\a \x03(\v2$.google.protobuf.UninterpretedOptionR\x13uninterpretedOption*\t\b\xe8\a\x10\x80\x80\x80\x80\x02J\x04\b\x05\x10\x06\"\xd8\x02\n" + + "\x10EnumValueOptions\x12%\n" + + "\n" + + "deprecated\x18\x01 \x01(\b:\x05falseR\n" + + "deprecated\x127\n" + + "\bfeatures\x18\x02 \x01(\v2\x1b.google.protobuf.FeatureSetR\bfeatures\x12(\n" + + "\fdebug_redact\x18\x03 \x01(\b:\x05falseR\vdebugRedact\x12U\n" + + "\x0ffeature_support\x18\x04 \x01(\v2,.google.protobuf.FieldOptions.FeatureSupportR\x0efeatureSupport\x12X\n" + + "\x14uninterpreted_option\x18\xe7\a \x03(\v2$.google.protobuf.UninterpretedOptionR\x13uninterpretedOption*\t\b\xe8\a\x10\x80\x80\x80\x80\x02\"\xd5\x01\n" + + "\x0eServiceOptions\x127\n" + + "\bfeatures\x18\" \x01(\v2\x1b.google.protobuf.FeatureSetR\bfeatures\x12%\n" + + "\n" + + "deprecated\x18! \x01(\b:\x05falseR\n" + + "deprecated\x12X\n" + + "\x14uninterpreted_option\x18\xe7\a \x03(\v2$.google.protobuf.UninterpretedOptionR\x13uninterpretedOption*\t\b\xe8\a\x10\x80\x80\x80\x80\x02\"\x99\x03\n" + + "\rMethodOptions\x12%\n" + + "\n" + + "deprecated\x18! \x01(\b:\x05falseR\n" + + "deprecated\x12q\n" + + "\x11idempotency_level\x18\" \x01(\x0e2/.google.protobuf.MethodOptions.IdempotencyLevel:\x13IDEMPOTENCY_UNKNOWNR\x10idempotencyLevel\x127\n" + + "\bfeatures\x18# \x01(\v2\x1b.google.protobuf.FeatureSetR\bfeatures\x12X\n" + + "\x14uninterpreted_option\x18\xe7\a \x03(\v2$.google.protobuf.UninterpretedOptionR\x13uninterpretedOption\"P\n" + + "\x10IdempotencyLevel\x12\x17\n" + + "\x13IDEMPOTENCY_UNKNOWN\x10\x00\x12\x13\n" + + "\x0fNO_SIDE_EFFECTS\x10\x01\x12\x0e\n" + + "\n" + + "IDEMPOTENT\x10\x02*\t\b\xe8\a\x10\x80\x80\x80\x80\x02\"\x9a\x03\n" + + "\x13UninterpretedOption\x12A\n" + + "\x04name\x18\x02 \x03(\v2-.google.protobuf.UninterpretedOption.NamePartR\x04name\x12)\n" + + "\x10identifier_value\x18\x03 \x01(\tR\x0fidentifierValue\x12,\n" + + "\x12positive_int_value\x18\x04 \x01(\x04R\x10positiveIntValue\x12,\n" + + "\x12negative_int_value\x18\x05 \x01(\x03R\x10negativeIntValue\x12!\n" + + "\fdouble_value\x18\x06 \x01(\x01R\vdoubleValue\x12!\n" + + "\fstring_value\x18\a \x01(\fR\vstringValue\x12'\n" + + "\x0faggregate_value\x18\b \x01(\tR\x0eaggregateValue\x1aJ\n" + + "\bNamePart\x12\x1b\n" + + "\tname_part\x18\x01 \x02(\tR\bnamePart\x12!\n" + + "\fis_extension\x18\x02 \x02(\bR\visExtension\"\x8e\x0f\n" + + "\n" + + "FeatureSet\x12\x91\x01\n" + + "\x0efield_presence\x18\x01 \x01(\x0e2).google.protobuf.FeatureSet.FieldPresenceB?\x88\x01\x01\x98\x01\x04\x98\x01\x01\xa2\x01\r\x12\bEXPLICIT\x18\x84\a\xa2\x01\r\x12\bIMPLICIT\x18\xe7\a\xa2\x01\r\x12\bEXPLICIT\x18\xe8\a\xb2\x01\x03\b\xe8\aR\rfieldPresence\x12l\n" + + "\tenum_type\x18\x02 \x01(\x0e2$.google.protobuf.FeatureSet.EnumTypeB)\x88\x01\x01\x98\x01\x06\x98\x01\x01\xa2\x01\v\x12\x06CLOSED\x18\x84\a\xa2\x01\t\x12\x04OPEN\x18\xe7\a\xb2\x01\x03\b\xe8\aR\benumType\x12\x98\x01\n" + + "\x17repeated_field_encoding\x18\x03 \x01(\x0e21.google.protobuf.FeatureSet.RepeatedFieldEncodingB-\x88\x01\x01\x98\x01\x04\x98\x01\x01\xa2\x01\r\x12\bEXPANDED\x18\x84\a\xa2\x01\v\x12\x06PACKED\x18\xe7\a\xb2\x01\x03\b\xe8\aR\x15repeatedFieldEncoding\x12~\n" + + "\x0futf8_validation\x18\x04 \x01(\x0e2*.google.protobuf.FeatureSet.Utf8ValidationB)\x88\x01\x01\x98\x01\x04\x98\x01\x01\xa2\x01\t\x12\x04NONE\x18\x84\a\xa2\x01\v\x12\x06VERIFY\x18\xe7\a\xb2\x01\x03\b\xe8\aR\x0eutf8Validation\x12~\n" + + "\x10message_encoding\x18\x05 \x01(\x0e2+.google.protobuf.FeatureSet.MessageEncodingB&\x88\x01\x01\x98\x01\x04\x98\x01\x01\xa2\x01\x14\x12\x0fLENGTH_PREFIXED\x18\x84\a\xb2\x01\x03\b\xe8\aR\x0fmessageEncoding\x12\x82\x01\n" + + "\vjson_format\x18\x06 \x01(\x0e2&.google.protobuf.FeatureSet.JsonFormatB9\x88\x01\x01\x98\x01\x03\x98\x01\x06\x98\x01\x01\xa2\x01\x17\x12\x12LEGACY_BEST_EFFORT\x18\x84\a\xa2\x01\n" + + "\x12\x05ALLOW\x18\xe7\a\xb2\x01\x03\b\xe8\aR\n" + + "jsonFormat\x12\xab\x01\n" + + "\x14enforce_naming_style\x18\a \x01(\x0e2..google.protobuf.FeatureSet.EnforceNamingStyleBI\x88\x01\x02\x98\x01\x01\x98\x01\x02\x98\x01\x03\x98\x01\x04\x98\x01\x05\x98\x01\x06\x98\x01\a\x98\x01\b\x98\x01\t\xa2\x01\x11\x12\fSTYLE_LEGACY\x18\x84\a\xa2\x01\x0e\x12\tSTYLE2024\x18\xe9\a\xb2\x01\x03\b\xe9\aR\x12enforceNamingStyle\x12\xb9\x01\n" + + "\x19default_symbol_visibility\x18\b \x01(\x0e2E.google.protobuf.FeatureSet.VisibilityFeature.DefaultSymbolVisibilityB6\x88\x01\x02\x98\x01\x01\xa2\x01\x0f\x12\n" + + "EXPORT_ALL\x18\x84\a\xa2\x01\x15\x12\x10EXPORT_TOP_LEVEL\x18\xe9\a\xb2\x01\x03\b\xe9\aR\x17defaultSymbolVisibility\x1a\xa1\x01\n" + + "\x11VisibilityFeature\"\x81\x01\n" + + "\x17DefaultSymbolVisibility\x12%\n" + + "!DEFAULT_SYMBOL_VISIBILITY_UNKNOWN\x10\x00\x12\x0e\n" + + "\n" + + "EXPORT_ALL\x10\x01\x12\x14\n" + + "\x10EXPORT_TOP_LEVEL\x10\x02\x12\r\n" + + "\tLOCAL_ALL\x10\x03\x12\n" + + "\n" + + "\x06STRICT\x10\x04J\b\b\x01\x10\x80\x80\x80\x80\x02\"\\\n" + + "\rFieldPresence\x12\x1a\n" + + "\x16FIELD_PRESENCE_UNKNOWN\x10\x00\x12\f\n" + + "\bEXPLICIT\x10\x01\x12\f\n" + + "\bIMPLICIT\x10\x02\x12\x13\n" + + "\x0fLEGACY_REQUIRED\x10\x03\"7\n" + + "\bEnumType\x12\x15\n" + + "\x11ENUM_TYPE_UNKNOWN\x10\x00\x12\b\n" + + "\x04OPEN\x10\x01\x12\n" + + "\n" + + "\x06CLOSED\x10\x02\"V\n" + + "\x15RepeatedFieldEncoding\x12#\n" + + "\x1fREPEATED_FIELD_ENCODING_UNKNOWN\x10\x00\x12\n" + + "\n" + + "\x06PACKED\x10\x01\x12\f\n" + + "\bEXPANDED\x10\x02\"I\n" + + "\x0eUtf8Validation\x12\x1b\n" + + "\x17UTF8_VALIDATION_UNKNOWN\x10\x00\x12\n" + + "\n" + + "\x06VERIFY\x10\x02\x12\b\n" + + "\x04NONE\x10\x03\"\x04\b\x01\x10\x01\"S\n" + + "\x0fMessageEncoding\x12\x1c\n" + + "\x18MESSAGE_ENCODING_UNKNOWN\x10\x00\x12\x13\n" + + "\x0fLENGTH_PREFIXED\x10\x01\x12\r\n" + + "\tDELIMITED\x10\x02\"H\n" + + "\n" + + "JsonFormat\x12\x17\n" + + "\x13JSON_FORMAT_UNKNOWN\x10\x00\x12\t\n" + + "\x05ALLOW\x10\x01\x12\x16\n" + + "\x12LEGACY_BEST_EFFORT\x10\x02\"W\n" + + "\x12EnforceNamingStyle\x12 \n" + + "\x1cENFORCE_NAMING_STYLE_UNKNOWN\x10\x00\x12\r\n" + + "\tSTYLE2024\x10\x01\x12\x10\n" + + "\fSTYLE_LEGACY\x10\x02*\x06\b\xe8\a\x10\x8bN*\x06\b\x8bN\x10\x90N*\x06\b\x90N\x10\x91NJ\x06\b\xe7\a\x10\xe8\a\"\xef\x03\n" + + "\x12FeatureSetDefaults\x12X\n" + + "\bdefaults\x18\x01 \x03(\v2<.google.protobuf.FeatureSetDefaults.FeatureSetEditionDefaultR\bdefaults\x12A\n" + + "\x0fminimum_edition\x18\x04 \x01(\x0e2\x18.google.protobuf.EditionR\x0eminimumEdition\x12A\n" + + "\x0fmaximum_edition\x18\x05 \x01(\x0e2\x18.google.protobuf.EditionR\x0emaximumEdition\x1a\xf8\x01\n" + + "\x18FeatureSetEditionDefault\x122\n" + + "\aedition\x18\x03 \x01(\x0e2\x18.google.protobuf.EditionR\aedition\x12N\n" + + "\x14overridable_features\x18\x04 \x01(\v2\x1b.google.protobuf.FeatureSetR\x13overridableFeatures\x12B\n" + + "\x0efixed_features\x18\x05 \x01(\v2\x1b.google.protobuf.FeatureSetR\rfixedFeaturesJ\x04\b\x01\x10\x02J\x04\b\x02\x10\x03R\bfeatures\"\xb5\x02\n" + + "\x0eSourceCodeInfo\x12D\n" + + "\blocation\x18\x01 \x03(\v2(.google.protobuf.SourceCodeInfo.LocationR\blocation\x1a\xce\x01\n" + + "\bLocation\x12\x16\n" + + "\x04path\x18\x01 \x03(\x05B\x02\x10\x01R\x04path\x12\x16\n" + + "\x04span\x18\x02 \x03(\x05B\x02\x10\x01R\x04span\x12)\n" + + "\x10leading_comments\x18\x03 \x01(\tR\x0fleadingComments\x12+\n" + + "\x11trailing_comments\x18\x04 \x01(\tR\x10trailingComments\x12:\n" + + "\x19leading_detached_comments\x18\x06 \x03(\tR\x17leadingDetachedComments*\f\b\x80\xec\xca\xff\x01\x10\x81\xec\xca\xff\x01\"\xd0\x02\n" + + "\x11GeneratedCodeInfo\x12M\n" + + "\n" + + "annotation\x18\x01 \x03(\v2-.google.protobuf.GeneratedCodeInfo.AnnotationR\n" + + "annotation\x1a\xeb\x01\n" + + "\n" + + "Annotation\x12\x16\n" + + "\x04path\x18\x01 \x03(\x05B\x02\x10\x01R\x04path\x12\x1f\n" + + "\vsource_file\x18\x02 \x01(\tR\n" + + "sourceFile\x12\x14\n" + + "\x05begin\x18\x03 \x01(\x05R\x05begin\x12\x10\n" + + "\x03end\x18\x04 \x01(\x05R\x03end\x12R\n" + + "\bsemantic\x18\x05 \x01(\x0e26.google.protobuf.GeneratedCodeInfo.Annotation.SemanticR\bsemantic\"(\n" + + "\bSemantic\x12\b\n" + + "\x04NONE\x10\x00\x12\a\n" + + "\x03SET\x10\x01\x12\t\n" + + "\x05ALIAS\x10\x02*\xa7\x02\n" + + "\aEdition\x12\x13\n" + + "\x0fEDITION_UNKNOWN\x10\x00\x12\x13\n" + + "\x0eEDITION_LEGACY\x10\x84\a\x12\x13\n" + + "\x0eEDITION_PROTO2\x10\xe6\a\x12\x13\n" + + "\x0eEDITION_PROTO3\x10\xe7\a\x12\x11\n" + + "\fEDITION_2023\x10\xe8\a\x12\x11\n" + + "\fEDITION_2024\x10\xe9\a\x12\x17\n" + + "\x13EDITION_1_TEST_ONLY\x10\x01\x12\x17\n" + + "\x13EDITION_2_TEST_ONLY\x10\x02\x12\x1d\n" + + "\x17EDITION_99997_TEST_ONLY\x10\x9d\x8d\x06\x12\x1d\n" + + "\x17EDITION_99998_TEST_ONLY\x10\x9e\x8d\x06\x12\x1d\n" + + "\x17EDITION_99999_TEST_ONLY\x10\x9f\x8d\x06\x12\x13\n" + + "\vEDITION_MAX\x10\xff\xff\xff\xff\a*U\n" + + "\x10SymbolVisibility\x12\x14\n" + + "\x10VISIBILITY_UNSET\x10\x00\x12\x14\n" + + "\x10VISIBILITY_LOCAL\x10\x01\x12\x15\n" + + "\x11VISIBILITY_EXPORT\x10\x02B~\n" + + "\x13com.google.protobufB\x10DescriptorProtosH\x01Z-google.golang.org/protobuf/types/descriptorpb\xf8\x01\x01\xa2\x02\x03GPB\xaa\x02\x1aGoogle.Protobuf.Reflection" var ( file_google_protobuf_descriptor_proto_rawDescOnce sync.Once @@ -5145,143 +5065,151 @@ func file_google_protobuf_descriptor_proto_rawDescGZIP() []byte { return file_google_protobuf_descriptor_proto_rawDescData } -var file_google_protobuf_descriptor_proto_enumTypes = make([]protoimpl.EnumInfo, 17) -var file_google_protobuf_descriptor_proto_msgTypes = make([]protoimpl.MessageInfo, 33) +var file_google_protobuf_descriptor_proto_enumTypes = make([]protoimpl.EnumInfo, 20) +var file_google_protobuf_descriptor_proto_msgTypes = make([]protoimpl.MessageInfo, 34) var file_google_protobuf_descriptor_proto_goTypes = []any{ - (Edition)(0), // 0: google.protobuf.Edition - (ExtensionRangeOptions_VerificationState)(0), // 1: google.protobuf.ExtensionRangeOptions.VerificationState - (FieldDescriptorProto_Type)(0), // 2: google.protobuf.FieldDescriptorProto.Type - (FieldDescriptorProto_Label)(0), // 3: google.protobuf.FieldDescriptorProto.Label - (FileOptions_OptimizeMode)(0), // 4: google.protobuf.FileOptions.OptimizeMode - (FieldOptions_CType)(0), // 5: google.protobuf.FieldOptions.CType - (FieldOptions_JSType)(0), // 6: google.protobuf.FieldOptions.JSType - (FieldOptions_OptionRetention)(0), // 7: google.protobuf.FieldOptions.OptionRetention - (FieldOptions_OptionTargetType)(0), // 8: google.protobuf.FieldOptions.OptionTargetType - (MethodOptions_IdempotencyLevel)(0), // 9: google.protobuf.MethodOptions.IdempotencyLevel - (FeatureSet_FieldPresence)(0), // 10: google.protobuf.FeatureSet.FieldPresence - (FeatureSet_EnumType)(0), // 11: google.protobuf.FeatureSet.EnumType - (FeatureSet_RepeatedFieldEncoding)(0), // 12: google.protobuf.FeatureSet.RepeatedFieldEncoding - (FeatureSet_Utf8Validation)(0), // 13: google.protobuf.FeatureSet.Utf8Validation - (FeatureSet_MessageEncoding)(0), // 14: google.protobuf.FeatureSet.MessageEncoding - (FeatureSet_JsonFormat)(0), // 15: google.protobuf.FeatureSet.JsonFormat - (GeneratedCodeInfo_Annotation_Semantic)(0), // 16: google.protobuf.GeneratedCodeInfo.Annotation.Semantic - (*FileDescriptorSet)(nil), // 17: google.protobuf.FileDescriptorSet - (*FileDescriptorProto)(nil), // 18: google.protobuf.FileDescriptorProto - (*DescriptorProto)(nil), // 19: google.protobuf.DescriptorProto - (*ExtensionRangeOptions)(nil), // 20: google.protobuf.ExtensionRangeOptions - (*FieldDescriptorProto)(nil), // 21: google.protobuf.FieldDescriptorProto - (*OneofDescriptorProto)(nil), // 22: google.protobuf.OneofDescriptorProto - (*EnumDescriptorProto)(nil), // 23: google.protobuf.EnumDescriptorProto - (*EnumValueDescriptorProto)(nil), // 24: google.protobuf.EnumValueDescriptorProto - (*ServiceDescriptorProto)(nil), // 25: google.protobuf.ServiceDescriptorProto - (*MethodDescriptorProto)(nil), // 26: google.protobuf.MethodDescriptorProto - (*FileOptions)(nil), // 27: google.protobuf.FileOptions - (*MessageOptions)(nil), // 28: google.protobuf.MessageOptions - (*FieldOptions)(nil), // 29: google.protobuf.FieldOptions - (*OneofOptions)(nil), // 30: google.protobuf.OneofOptions - (*EnumOptions)(nil), // 31: google.protobuf.EnumOptions - (*EnumValueOptions)(nil), // 32: google.protobuf.EnumValueOptions - (*ServiceOptions)(nil), // 33: google.protobuf.ServiceOptions - (*MethodOptions)(nil), // 34: google.protobuf.MethodOptions - (*UninterpretedOption)(nil), // 35: google.protobuf.UninterpretedOption - (*FeatureSet)(nil), // 36: google.protobuf.FeatureSet - (*FeatureSetDefaults)(nil), // 37: google.protobuf.FeatureSetDefaults - (*SourceCodeInfo)(nil), // 38: google.protobuf.SourceCodeInfo - (*GeneratedCodeInfo)(nil), // 39: google.protobuf.GeneratedCodeInfo - (*DescriptorProto_ExtensionRange)(nil), // 40: google.protobuf.DescriptorProto.ExtensionRange - (*DescriptorProto_ReservedRange)(nil), // 41: google.protobuf.DescriptorProto.ReservedRange - (*ExtensionRangeOptions_Declaration)(nil), // 42: google.protobuf.ExtensionRangeOptions.Declaration - (*EnumDescriptorProto_EnumReservedRange)(nil), // 43: google.protobuf.EnumDescriptorProto.EnumReservedRange - (*FieldOptions_EditionDefault)(nil), // 44: google.protobuf.FieldOptions.EditionDefault - (*FieldOptions_FeatureSupport)(nil), // 45: google.protobuf.FieldOptions.FeatureSupport - (*UninterpretedOption_NamePart)(nil), // 46: google.protobuf.UninterpretedOption.NamePart - (*FeatureSetDefaults_FeatureSetEditionDefault)(nil), // 47: google.protobuf.FeatureSetDefaults.FeatureSetEditionDefault - (*SourceCodeInfo_Location)(nil), // 48: google.protobuf.SourceCodeInfo.Location - (*GeneratedCodeInfo_Annotation)(nil), // 49: google.protobuf.GeneratedCodeInfo.Annotation + (Edition)(0), // 0: google.protobuf.Edition + (SymbolVisibility)(0), // 1: google.protobuf.SymbolVisibility + (ExtensionRangeOptions_VerificationState)(0), // 2: google.protobuf.ExtensionRangeOptions.VerificationState + (FieldDescriptorProto_Type)(0), // 3: google.protobuf.FieldDescriptorProto.Type + (FieldDescriptorProto_Label)(0), // 4: google.protobuf.FieldDescriptorProto.Label + (FileOptions_OptimizeMode)(0), // 5: google.protobuf.FileOptions.OptimizeMode + (FieldOptions_CType)(0), // 6: google.protobuf.FieldOptions.CType + (FieldOptions_JSType)(0), // 7: google.protobuf.FieldOptions.JSType + (FieldOptions_OptionRetention)(0), // 8: google.protobuf.FieldOptions.OptionRetention + (FieldOptions_OptionTargetType)(0), // 9: google.protobuf.FieldOptions.OptionTargetType + (MethodOptions_IdempotencyLevel)(0), // 10: google.protobuf.MethodOptions.IdempotencyLevel + (FeatureSet_FieldPresence)(0), // 11: google.protobuf.FeatureSet.FieldPresence + (FeatureSet_EnumType)(0), // 12: google.protobuf.FeatureSet.EnumType + (FeatureSet_RepeatedFieldEncoding)(0), // 13: google.protobuf.FeatureSet.RepeatedFieldEncoding + (FeatureSet_Utf8Validation)(0), // 14: google.protobuf.FeatureSet.Utf8Validation + (FeatureSet_MessageEncoding)(0), // 15: google.protobuf.FeatureSet.MessageEncoding + (FeatureSet_JsonFormat)(0), // 16: google.protobuf.FeatureSet.JsonFormat + (FeatureSet_EnforceNamingStyle)(0), // 17: google.protobuf.FeatureSet.EnforceNamingStyle + (FeatureSet_VisibilityFeature_DefaultSymbolVisibility)(0), // 18: google.protobuf.FeatureSet.VisibilityFeature.DefaultSymbolVisibility + (GeneratedCodeInfo_Annotation_Semantic)(0), // 19: google.protobuf.GeneratedCodeInfo.Annotation.Semantic + (*FileDescriptorSet)(nil), // 20: google.protobuf.FileDescriptorSet + (*FileDescriptorProto)(nil), // 21: google.protobuf.FileDescriptorProto + (*DescriptorProto)(nil), // 22: google.protobuf.DescriptorProto + (*ExtensionRangeOptions)(nil), // 23: google.protobuf.ExtensionRangeOptions + (*FieldDescriptorProto)(nil), // 24: google.protobuf.FieldDescriptorProto + (*OneofDescriptorProto)(nil), // 25: google.protobuf.OneofDescriptorProto + (*EnumDescriptorProto)(nil), // 26: google.protobuf.EnumDescriptorProto + (*EnumValueDescriptorProto)(nil), // 27: google.protobuf.EnumValueDescriptorProto + (*ServiceDescriptorProto)(nil), // 28: google.protobuf.ServiceDescriptorProto + (*MethodDescriptorProto)(nil), // 29: google.protobuf.MethodDescriptorProto + (*FileOptions)(nil), // 30: google.protobuf.FileOptions + (*MessageOptions)(nil), // 31: google.protobuf.MessageOptions + (*FieldOptions)(nil), // 32: google.protobuf.FieldOptions + (*OneofOptions)(nil), // 33: google.protobuf.OneofOptions + (*EnumOptions)(nil), // 34: google.protobuf.EnumOptions + (*EnumValueOptions)(nil), // 35: google.protobuf.EnumValueOptions + (*ServiceOptions)(nil), // 36: google.protobuf.ServiceOptions + (*MethodOptions)(nil), // 37: google.protobuf.MethodOptions + (*UninterpretedOption)(nil), // 38: google.protobuf.UninterpretedOption + (*FeatureSet)(nil), // 39: google.protobuf.FeatureSet + (*FeatureSetDefaults)(nil), // 40: google.protobuf.FeatureSetDefaults + (*SourceCodeInfo)(nil), // 41: google.protobuf.SourceCodeInfo + (*GeneratedCodeInfo)(nil), // 42: google.protobuf.GeneratedCodeInfo + (*DescriptorProto_ExtensionRange)(nil), // 43: google.protobuf.DescriptorProto.ExtensionRange + (*DescriptorProto_ReservedRange)(nil), // 44: google.protobuf.DescriptorProto.ReservedRange + (*ExtensionRangeOptions_Declaration)(nil), // 45: google.protobuf.ExtensionRangeOptions.Declaration + (*EnumDescriptorProto_EnumReservedRange)(nil), // 46: google.protobuf.EnumDescriptorProto.EnumReservedRange + (*FieldOptions_EditionDefault)(nil), // 47: google.protobuf.FieldOptions.EditionDefault + (*FieldOptions_FeatureSupport)(nil), // 48: google.protobuf.FieldOptions.FeatureSupport + (*UninterpretedOption_NamePart)(nil), // 49: google.protobuf.UninterpretedOption.NamePart + (*FeatureSet_VisibilityFeature)(nil), // 50: google.protobuf.FeatureSet.VisibilityFeature + (*FeatureSetDefaults_FeatureSetEditionDefault)(nil), // 51: google.protobuf.FeatureSetDefaults.FeatureSetEditionDefault + (*SourceCodeInfo_Location)(nil), // 52: google.protobuf.SourceCodeInfo.Location + (*GeneratedCodeInfo_Annotation)(nil), // 53: google.protobuf.GeneratedCodeInfo.Annotation } var file_google_protobuf_descriptor_proto_depIdxs = []int32{ - 18, // 0: google.protobuf.FileDescriptorSet.file:type_name -> google.protobuf.FileDescriptorProto - 19, // 1: google.protobuf.FileDescriptorProto.message_type:type_name -> google.protobuf.DescriptorProto - 23, // 2: google.protobuf.FileDescriptorProto.enum_type:type_name -> google.protobuf.EnumDescriptorProto - 25, // 3: google.protobuf.FileDescriptorProto.service:type_name -> google.protobuf.ServiceDescriptorProto - 21, // 4: google.protobuf.FileDescriptorProto.extension:type_name -> google.protobuf.FieldDescriptorProto - 27, // 5: google.protobuf.FileDescriptorProto.options:type_name -> google.protobuf.FileOptions - 38, // 6: google.protobuf.FileDescriptorProto.source_code_info:type_name -> google.protobuf.SourceCodeInfo + 21, // 0: google.protobuf.FileDescriptorSet.file:type_name -> google.protobuf.FileDescriptorProto + 22, // 1: google.protobuf.FileDescriptorProto.message_type:type_name -> google.protobuf.DescriptorProto + 26, // 2: google.protobuf.FileDescriptorProto.enum_type:type_name -> google.protobuf.EnumDescriptorProto + 28, // 3: google.protobuf.FileDescriptorProto.service:type_name -> google.protobuf.ServiceDescriptorProto + 24, // 4: google.protobuf.FileDescriptorProto.extension:type_name -> google.protobuf.FieldDescriptorProto + 30, // 5: google.protobuf.FileDescriptorProto.options:type_name -> google.protobuf.FileOptions + 41, // 6: google.protobuf.FileDescriptorProto.source_code_info:type_name -> google.protobuf.SourceCodeInfo 0, // 7: google.protobuf.FileDescriptorProto.edition:type_name -> google.protobuf.Edition - 21, // 8: google.protobuf.DescriptorProto.field:type_name -> google.protobuf.FieldDescriptorProto - 21, // 9: google.protobuf.DescriptorProto.extension:type_name -> google.protobuf.FieldDescriptorProto - 19, // 10: google.protobuf.DescriptorProto.nested_type:type_name -> google.protobuf.DescriptorProto - 23, // 11: google.protobuf.DescriptorProto.enum_type:type_name -> google.protobuf.EnumDescriptorProto - 40, // 12: google.protobuf.DescriptorProto.extension_range:type_name -> google.protobuf.DescriptorProto.ExtensionRange - 22, // 13: google.protobuf.DescriptorProto.oneof_decl:type_name -> google.protobuf.OneofDescriptorProto - 28, // 14: google.protobuf.DescriptorProto.options:type_name -> google.protobuf.MessageOptions - 41, // 15: google.protobuf.DescriptorProto.reserved_range:type_name -> google.protobuf.DescriptorProto.ReservedRange - 35, // 16: google.protobuf.ExtensionRangeOptions.uninterpreted_option:type_name -> google.protobuf.UninterpretedOption - 42, // 17: google.protobuf.ExtensionRangeOptions.declaration:type_name -> google.protobuf.ExtensionRangeOptions.Declaration - 36, // 18: google.protobuf.ExtensionRangeOptions.features:type_name -> google.protobuf.FeatureSet - 1, // 19: google.protobuf.ExtensionRangeOptions.verification:type_name -> google.protobuf.ExtensionRangeOptions.VerificationState - 3, // 20: google.protobuf.FieldDescriptorProto.label:type_name -> google.protobuf.FieldDescriptorProto.Label - 2, // 21: google.protobuf.FieldDescriptorProto.type:type_name -> google.protobuf.FieldDescriptorProto.Type - 29, // 22: google.protobuf.FieldDescriptorProto.options:type_name -> google.protobuf.FieldOptions - 30, // 23: google.protobuf.OneofDescriptorProto.options:type_name -> google.protobuf.OneofOptions - 24, // 24: google.protobuf.EnumDescriptorProto.value:type_name -> google.protobuf.EnumValueDescriptorProto - 31, // 25: google.protobuf.EnumDescriptorProto.options:type_name -> google.protobuf.EnumOptions - 43, // 26: google.protobuf.EnumDescriptorProto.reserved_range:type_name -> google.protobuf.EnumDescriptorProto.EnumReservedRange - 32, // 27: google.protobuf.EnumValueDescriptorProto.options:type_name -> google.protobuf.EnumValueOptions - 26, // 28: google.protobuf.ServiceDescriptorProto.method:type_name -> google.protobuf.MethodDescriptorProto - 33, // 29: google.protobuf.ServiceDescriptorProto.options:type_name -> google.protobuf.ServiceOptions - 34, // 30: google.protobuf.MethodDescriptorProto.options:type_name -> google.protobuf.MethodOptions - 4, // 31: google.protobuf.FileOptions.optimize_for:type_name -> google.protobuf.FileOptions.OptimizeMode - 36, // 32: google.protobuf.FileOptions.features:type_name -> google.protobuf.FeatureSet - 35, // 33: google.protobuf.FileOptions.uninterpreted_option:type_name -> google.protobuf.UninterpretedOption - 36, // 34: google.protobuf.MessageOptions.features:type_name -> google.protobuf.FeatureSet - 35, // 35: google.protobuf.MessageOptions.uninterpreted_option:type_name -> google.protobuf.UninterpretedOption - 5, // 36: google.protobuf.FieldOptions.ctype:type_name -> google.protobuf.FieldOptions.CType - 6, // 37: google.protobuf.FieldOptions.jstype:type_name -> google.protobuf.FieldOptions.JSType - 7, // 38: google.protobuf.FieldOptions.retention:type_name -> google.protobuf.FieldOptions.OptionRetention - 8, // 39: google.protobuf.FieldOptions.targets:type_name -> google.protobuf.FieldOptions.OptionTargetType - 44, // 40: google.protobuf.FieldOptions.edition_defaults:type_name -> google.protobuf.FieldOptions.EditionDefault - 36, // 41: google.protobuf.FieldOptions.features:type_name -> google.protobuf.FeatureSet - 45, // 42: google.protobuf.FieldOptions.feature_support:type_name -> google.protobuf.FieldOptions.FeatureSupport - 35, // 43: google.protobuf.FieldOptions.uninterpreted_option:type_name -> google.protobuf.UninterpretedOption - 36, // 44: google.protobuf.OneofOptions.features:type_name -> google.protobuf.FeatureSet - 35, // 45: google.protobuf.OneofOptions.uninterpreted_option:type_name -> google.protobuf.UninterpretedOption - 36, // 46: google.protobuf.EnumOptions.features:type_name -> google.protobuf.FeatureSet - 35, // 47: google.protobuf.EnumOptions.uninterpreted_option:type_name -> google.protobuf.UninterpretedOption - 36, // 48: google.protobuf.EnumValueOptions.features:type_name -> google.protobuf.FeatureSet - 45, // 49: google.protobuf.EnumValueOptions.feature_support:type_name -> google.protobuf.FieldOptions.FeatureSupport - 35, // 50: google.protobuf.EnumValueOptions.uninterpreted_option:type_name -> google.protobuf.UninterpretedOption - 36, // 51: google.protobuf.ServiceOptions.features:type_name -> google.protobuf.FeatureSet - 35, // 52: google.protobuf.ServiceOptions.uninterpreted_option:type_name -> google.protobuf.UninterpretedOption - 9, // 53: google.protobuf.MethodOptions.idempotency_level:type_name -> google.protobuf.MethodOptions.IdempotencyLevel - 36, // 54: google.protobuf.MethodOptions.features:type_name -> google.protobuf.FeatureSet - 35, // 55: google.protobuf.MethodOptions.uninterpreted_option:type_name -> google.protobuf.UninterpretedOption - 46, // 56: google.protobuf.UninterpretedOption.name:type_name -> google.protobuf.UninterpretedOption.NamePart - 10, // 57: google.protobuf.FeatureSet.field_presence:type_name -> google.protobuf.FeatureSet.FieldPresence - 11, // 58: google.protobuf.FeatureSet.enum_type:type_name -> google.protobuf.FeatureSet.EnumType - 12, // 59: google.protobuf.FeatureSet.repeated_field_encoding:type_name -> google.protobuf.FeatureSet.RepeatedFieldEncoding - 13, // 60: google.protobuf.FeatureSet.utf8_validation:type_name -> google.protobuf.FeatureSet.Utf8Validation - 14, // 61: google.protobuf.FeatureSet.message_encoding:type_name -> google.protobuf.FeatureSet.MessageEncoding - 15, // 62: google.protobuf.FeatureSet.json_format:type_name -> google.protobuf.FeatureSet.JsonFormat - 47, // 63: google.protobuf.FeatureSetDefaults.defaults:type_name -> google.protobuf.FeatureSetDefaults.FeatureSetEditionDefault - 0, // 64: google.protobuf.FeatureSetDefaults.minimum_edition:type_name -> google.protobuf.Edition - 0, // 65: google.protobuf.FeatureSetDefaults.maximum_edition:type_name -> google.protobuf.Edition - 48, // 66: google.protobuf.SourceCodeInfo.location:type_name -> google.protobuf.SourceCodeInfo.Location - 49, // 67: google.protobuf.GeneratedCodeInfo.annotation:type_name -> google.protobuf.GeneratedCodeInfo.Annotation - 20, // 68: google.protobuf.DescriptorProto.ExtensionRange.options:type_name -> google.protobuf.ExtensionRangeOptions - 0, // 69: google.protobuf.FieldOptions.EditionDefault.edition:type_name -> google.protobuf.Edition - 0, // 70: google.protobuf.FieldOptions.FeatureSupport.edition_introduced:type_name -> google.protobuf.Edition - 0, // 71: google.protobuf.FieldOptions.FeatureSupport.edition_deprecated:type_name -> google.protobuf.Edition - 0, // 72: google.protobuf.FieldOptions.FeatureSupport.edition_removed:type_name -> google.protobuf.Edition - 0, // 73: google.protobuf.FeatureSetDefaults.FeatureSetEditionDefault.edition:type_name -> google.protobuf.Edition - 36, // 74: google.protobuf.FeatureSetDefaults.FeatureSetEditionDefault.overridable_features:type_name -> google.protobuf.FeatureSet - 36, // 75: google.protobuf.FeatureSetDefaults.FeatureSetEditionDefault.fixed_features:type_name -> google.protobuf.FeatureSet - 16, // 76: google.protobuf.GeneratedCodeInfo.Annotation.semantic:type_name -> google.protobuf.GeneratedCodeInfo.Annotation.Semantic - 77, // [77:77] is the sub-list for method output_type - 77, // [77:77] is the sub-list for method input_type - 77, // [77:77] is the sub-list for extension type_name - 77, // [77:77] is the sub-list for extension extendee - 0, // [0:77] is the sub-list for field type_name + 24, // 8: google.protobuf.DescriptorProto.field:type_name -> google.protobuf.FieldDescriptorProto + 24, // 9: google.protobuf.DescriptorProto.extension:type_name -> google.protobuf.FieldDescriptorProto + 22, // 10: google.protobuf.DescriptorProto.nested_type:type_name -> google.protobuf.DescriptorProto + 26, // 11: google.protobuf.DescriptorProto.enum_type:type_name -> google.protobuf.EnumDescriptorProto + 43, // 12: google.protobuf.DescriptorProto.extension_range:type_name -> google.protobuf.DescriptorProto.ExtensionRange + 25, // 13: google.protobuf.DescriptorProto.oneof_decl:type_name -> google.protobuf.OneofDescriptorProto + 31, // 14: google.protobuf.DescriptorProto.options:type_name -> google.protobuf.MessageOptions + 44, // 15: google.protobuf.DescriptorProto.reserved_range:type_name -> google.protobuf.DescriptorProto.ReservedRange + 1, // 16: google.protobuf.DescriptorProto.visibility:type_name -> google.protobuf.SymbolVisibility + 38, // 17: google.protobuf.ExtensionRangeOptions.uninterpreted_option:type_name -> google.protobuf.UninterpretedOption + 45, // 18: google.protobuf.ExtensionRangeOptions.declaration:type_name -> google.protobuf.ExtensionRangeOptions.Declaration + 39, // 19: google.protobuf.ExtensionRangeOptions.features:type_name -> google.protobuf.FeatureSet + 2, // 20: google.protobuf.ExtensionRangeOptions.verification:type_name -> google.protobuf.ExtensionRangeOptions.VerificationState + 4, // 21: google.protobuf.FieldDescriptorProto.label:type_name -> google.protobuf.FieldDescriptorProto.Label + 3, // 22: google.protobuf.FieldDescriptorProto.type:type_name -> google.protobuf.FieldDescriptorProto.Type + 32, // 23: google.protobuf.FieldDescriptorProto.options:type_name -> google.protobuf.FieldOptions + 33, // 24: google.protobuf.OneofDescriptorProto.options:type_name -> google.protobuf.OneofOptions + 27, // 25: google.protobuf.EnumDescriptorProto.value:type_name -> google.protobuf.EnumValueDescriptorProto + 34, // 26: google.protobuf.EnumDescriptorProto.options:type_name -> google.protobuf.EnumOptions + 46, // 27: google.protobuf.EnumDescriptorProto.reserved_range:type_name -> google.protobuf.EnumDescriptorProto.EnumReservedRange + 1, // 28: google.protobuf.EnumDescriptorProto.visibility:type_name -> google.protobuf.SymbolVisibility + 35, // 29: google.protobuf.EnumValueDescriptorProto.options:type_name -> google.protobuf.EnumValueOptions + 29, // 30: google.protobuf.ServiceDescriptorProto.method:type_name -> google.protobuf.MethodDescriptorProto + 36, // 31: google.protobuf.ServiceDescriptorProto.options:type_name -> google.protobuf.ServiceOptions + 37, // 32: google.protobuf.MethodDescriptorProto.options:type_name -> google.protobuf.MethodOptions + 5, // 33: google.protobuf.FileOptions.optimize_for:type_name -> google.protobuf.FileOptions.OptimizeMode + 39, // 34: google.protobuf.FileOptions.features:type_name -> google.protobuf.FeatureSet + 38, // 35: google.protobuf.FileOptions.uninterpreted_option:type_name -> google.protobuf.UninterpretedOption + 39, // 36: google.protobuf.MessageOptions.features:type_name -> google.protobuf.FeatureSet + 38, // 37: google.protobuf.MessageOptions.uninterpreted_option:type_name -> google.protobuf.UninterpretedOption + 6, // 38: google.protobuf.FieldOptions.ctype:type_name -> google.protobuf.FieldOptions.CType + 7, // 39: google.protobuf.FieldOptions.jstype:type_name -> google.protobuf.FieldOptions.JSType + 8, // 40: google.protobuf.FieldOptions.retention:type_name -> google.protobuf.FieldOptions.OptionRetention + 9, // 41: google.protobuf.FieldOptions.targets:type_name -> google.protobuf.FieldOptions.OptionTargetType + 47, // 42: google.protobuf.FieldOptions.edition_defaults:type_name -> google.protobuf.FieldOptions.EditionDefault + 39, // 43: google.protobuf.FieldOptions.features:type_name -> google.protobuf.FeatureSet + 48, // 44: google.protobuf.FieldOptions.feature_support:type_name -> google.protobuf.FieldOptions.FeatureSupport + 38, // 45: google.protobuf.FieldOptions.uninterpreted_option:type_name -> google.protobuf.UninterpretedOption + 39, // 46: google.protobuf.OneofOptions.features:type_name -> google.protobuf.FeatureSet + 38, // 47: google.protobuf.OneofOptions.uninterpreted_option:type_name -> google.protobuf.UninterpretedOption + 39, // 48: google.protobuf.EnumOptions.features:type_name -> google.protobuf.FeatureSet + 38, // 49: google.protobuf.EnumOptions.uninterpreted_option:type_name -> google.protobuf.UninterpretedOption + 39, // 50: google.protobuf.EnumValueOptions.features:type_name -> google.protobuf.FeatureSet + 48, // 51: google.protobuf.EnumValueOptions.feature_support:type_name -> google.protobuf.FieldOptions.FeatureSupport + 38, // 52: google.protobuf.EnumValueOptions.uninterpreted_option:type_name -> google.protobuf.UninterpretedOption + 39, // 53: google.protobuf.ServiceOptions.features:type_name -> google.protobuf.FeatureSet + 38, // 54: google.protobuf.ServiceOptions.uninterpreted_option:type_name -> google.protobuf.UninterpretedOption + 10, // 55: google.protobuf.MethodOptions.idempotency_level:type_name -> google.protobuf.MethodOptions.IdempotencyLevel + 39, // 56: google.protobuf.MethodOptions.features:type_name -> google.protobuf.FeatureSet + 38, // 57: google.protobuf.MethodOptions.uninterpreted_option:type_name -> google.protobuf.UninterpretedOption + 49, // 58: google.protobuf.UninterpretedOption.name:type_name -> google.protobuf.UninterpretedOption.NamePart + 11, // 59: google.protobuf.FeatureSet.field_presence:type_name -> google.protobuf.FeatureSet.FieldPresence + 12, // 60: google.protobuf.FeatureSet.enum_type:type_name -> google.protobuf.FeatureSet.EnumType + 13, // 61: google.protobuf.FeatureSet.repeated_field_encoding:type_name -> google.protobuf.FeatureSet.RepeatedFieldEncoding + 14, // 62: google.protobuf.FeatureSet.utf8_validation:type_name -> google.protobuf.FeatureSet.Utf8Validation + 15, // 63: google.protobuf.FeatureSet.message_encoding:type_name -> google.protobuf.FeatureSet.MessageEncoding + 16, // 64: google.protobuf.FeatureSet.json_format:type_name -> google.protobuf.FeatureSet.JsonFormat + 17, // 65: google.protobuf.FeatureSet.enforce_naming_style:type_name -> google.protobuf.FeatureSet.EnforceNamingStyle + 18, // 66: google.protobuf.FeatureSet.default_symbol_visibility:type_name -> google.protobuf.FeatureSet.VisibilityFeature.DefaultSymbolVisibility + 51, // 67: google.protobuf.FeatureSetDefaults.defaults:type_name -> google.protobuf.FeatureSetDefaults.FeatureSetEditionDefault + 0, // 68: google.protobuf.FeatureSetDefaults.minimum_edition:type_name -> google.protobuf.Edition + 0, // 69: google.protobuf.FeatureSetDefaults.maximum_edition:type_name -> google.protobuf.Edition + 52, // 70: google.protobuf.SourceCodeInfo.location:type_name -> google.protobuf.SourceCodeInfo.Location + 53, // 71: google.protobuf.GeneratedCodeInfo.annotation:type_name -> google.protobuf.GeneratedCodeInfo.Annotation + 23, // 72: google.protobuf.DescriptorProto.ExtensionRange.options:type_name -> google.protobuf.ExtensionRangeOptions + 0, // 73: google.protobuf.FieldOptions.EditionDefault.edition:type_name -> google.protobuf.Edition + 0, // 74: google.protobuf.FieldOptions.FeatureSupport.edition_introduced:type_name -> google.protobuf.Edition + 0, // 75: google.protobuf.FieldOptions.FeatureSupport.edition_deprecated:type_name -> google.protobuf.Edition + 0, // 76: google.protobuf.FieldOptions.FeatureSupport.edition_removed:type_name -> google.protobuf.Edition + 0, // 77: google.protobuf.FeatureSetDefaults.FeatureSetEditionDefault.edition:type_name -> google.protobuf.Edition + 39, // 78: google.protobuf.FeatureSetDefaults.FeatureSetEditionDefault.overridable_features:type_name -> google.protobuf.FeatureSet + 39, // 79: google.protobuf.FeatureSetDefaults.FeatureSetEditionDefault.fixed_features:type_name -> google.protobuf.FeatureSet + 19, // 80: google.protobuf.GeneratedCodeInfo.Annotation.semantic:type_name -> google.protobuf.GeneratedCodeInfo.Annotation.Semantic + 81, // [81:81] is the sub-list for method output_type + 81, // [81:81] is the sub-list for method input_type + 81, // [81:81] is the sub-list for extension type_name + 81, // [81:81] is the sub-list for extension extendee + 0, // [0:81] is the sub-list for field type_name } func init() { file_google_protobuf_descriptor_proto_init() } @@ -5294,8 +5222,8 @@ func file_google_protobuf_descriptor_proto_init() { File: protoimpl.DescBuilder{ GoPackagePath: reflect.TypeOf(x{}).PkgPath(), RawDescriptor: unsafe.Slice(unsafe.StringData(file_google_protobuf_descriptor_proto_rawDesc), len(file_google_protobuf_descriptor_proto_rawDesc)), - NumEnums: 17, - NumMessages: 33, + NumEnums: 20, + NumMessages: 34, NumExtensions: 0, NumServices: 0, }, diff --git a/vendor/google.golang.org/protobuf/types/gofeaturespb/go_features.pb.go b/vendor/google.golang.org/protobuf/types/gofeaturespb/go_features.pb.go index 28d24bad79..37e712b6b7 100644 --- a/vendor/google.golang.org/protobuf/types/gofeaturespb/go_features.pb.go +++ b/vendor/google.golang.org/protobuf/types/gofeaturespb/go_features.pb.go @@ -228,63 +228,29 @@ var ( var File_google_protobuf_go_features_proto protoreflect.FileDescriptor -var file_google_protobuf_go_features_proto_rawDesc = string([]byte{ - 0x0a, 0x21, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, - 0x66, 0x2f, 0x67, 0x6f, 0x5f, 0x66, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, 0x2e, 0x70, 0x72, - 0x6f, 0x74, 0x6f, 0x12, 0x02, 0x70, 0x62, 0x1a, 0x20, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, - 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, - 0x74, 0x6f, 0x72, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0xab, 0x05, 0x0a, 0x0a, 0x47, 0x6f, - 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, 0x12, 0xbe, 0x01, 0x0a, 0x1a, 0x6c, 0x65, 0x67, - 0x61, 0x63, 0x79, 0x5f, 0x75, 0x6e, 0x6d, 0x61, 0x72, 0x73, 0x68, 0x61, 0x6c, 0x5f, 0x6a, 0x73, - 0x6f, 0x6e, 0x5f, 0x65, 0x6e, 0x75, 0x6d, 0x18, 0x01, 0x20, 0x01, 0x28, 0x08, 0x42, 0x80, 0x01, - 0x88, 0x01, 0x01, 0x98, 0x01, 0x06, 0x98, 0x01, 0x01, 0xa2, 0x01, 0x09, 0x12, 0x04, 0x74, 0x72, - 0x75, 0x65, 0x18, 0x84, 0x07, 0xa2, 0x01, 0x0a, 0x12, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x18, - 0xe7, 0x07, 0xb2, 0x01, 0x5b, 0x08, 0xe8, 0x07, 0x10, 0xe8, 0x07, 0x1a, 0x53, 0x54, 0x68, 0x65, - 0x20, 0x6c, 0x65, 0x67, 0x61, 0x63, 0x79, 0x20, 0x55, 0x6e, 0x6d, 0x61, 0x72, 0x73, 0x68, 0x61, - 0x6c, 0x4a, 0x53, 0x4f, 0x4e, 0x20, 0x41, 0x50, 0x49, 0x20, 0x69, 0x73, 0x20, 0x64, 0x65, 0x70, - 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x20, 0x61, 0x6e, 0x64, 0x20, 0x77, 0x69, 0x6c, 0x6c, - 0x20, 0x62, 0x65, 0x20, 0x72, 0x65, 0x6d, 0x6f, 0x76, 0x65, 0x64, 0x20, 0x69, 0x6e, 0x20, 0x61, - 0x20, 0x66, 0x75, 0x74, 0x75, 0x72, 0x65, 0x20, 0x65, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x2e, - 0x52, 0x17, 0x6c, 0x65, 0x67, 0x61, 0x63, 0x79, 0x55, 0x6e, 0x6d, 0x61, 0x72, 0x73, 0x68, 0x61, - 0x6c, 0x4a, 0x73, 0x6f, 0x6e, 0x45, 0x6e, 0x75, 0x6d, 0x12, 0x74, 0x0a, 0x09, 0x61, 0x70, 0x69, - 0x5f, 0x6c, 0x65, 0x76, 0x65, 0x6c, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x17, 0x2e, 0x70, - 0x62, 0x2e, 0x47, 0x6f, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, 0x2e, 0x41, 0x50, 0x49, - 0x4c, 0x65, 0x76, 0x65, 0x6c, 0x42, 0x3e, 0x88, 0x01, 0x01, 0x98, 0x01, 0x03, 0x98, 0x01, 0x01, - 0xa2, 0x01, 0x1a, 0x12, 0x15, 0x41, 0x50, 0x49, 0x5f, 0x4c, 0x45, 0x56, 0x45, 0x4c, 0x5f, 0x55, - 0x4e, 0x53, 0x50, 0x45, 0x43, 0x49, 0x46, 0x49, 0x45, 0x44, 0x18, 0x84, 0x07, 0xa2, 0x01, 0x0f, - 0x12, 0x0a, 0x41, 0x50, 0x49, 0x5f, 0x4f, 0x50, 0x41, 0x51, 0x55, 0x45, 0x18, 0xe9, 0x07, 0xb2, - 0x01, 0x03, 0x08, 0xe8, 0x07, 0x52, 0x08, 0x61, 0x70, 0x69, 0x4c, 0x65, 0x76, 0x65, 0x6c, 0x12, - 0x7c, 0x0a, 0x11, 0x73, 0x74, 0x72, 0x69, 0x70, 0x5f, 0x65, 0x6e, 0x75, 0x6d, 0x5f, 0x70, 0x72, - 0x65, 0x66, 0x69, 0x78, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x1e, 0x2e, 0x70, 0x62, 0x2e, - 0x47, 0x6f, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, 0x2e, 0x53, 0x74, 0x72, 0x69, 0x70, - 0x45, 0x6e, 0x75, 0x6d, 0x50, 0x72, 0x65, 0x66, 0x69, 0x78, 0x42, 0x30, 0x88, 0x01, 0x01, 0x98, - 0x01, 0x06, 0x98, 0x01, 0x07, 0x98, 0x01, 0x01, 0xa2, 0x01, 0x1b, 0x12, 0x16, 0x53, 0x54, 0x52, - 0x49, 0x50, 0x5f, 0x45, 0x4e, 0x55, 0x4d, 0x5f, 0x50, 0x52, 0x45, 0x46, 0x49, 0x58, 0x5f, 0x4b, - 0x45, 0x45, 0x50, 0x18, 0x84, 0x07, 0xb2, 0x01, 0x03, 0x08, 0xe9, 0x07, 0x52, 0x0f, 0x73, 0x74, - 0x72, 0x69, 0x70, 0x45, 0x6e, 0x75, 0x6d, 0x50, 0x72, 0x65, 0x66, 0x69, 0x78, 0x22, 0x53, 0x0a, - 0x08, 0x41, 0x50, 0x49, 0x4c, 0x65, 0x76, 0x65, 0x6c, 0x12, 0x19, 0x0a, 0x15, 0x41, 0x50, 0x49, - 0x5f, 0x4c, 0x45, 0x56, 0x45, 0x4c, 0x5f, 0x55, 0x4e, 0x53, 0x50, 0x45, 0x43, 0x49, 0x46, 0x49, - 0x45, 0x44, 0x10, 0x00, 0x12, 0x0c, 0x0a, 0x08, 0x41, 0x50, 0x49, 0x5f, 0x4f, 0x50, 0x45, 0x4e, - 0x10, 0x01, 0x12, 0x0e, 0x0a, 0x0a, 0x41, 0x50, 0x49, 0x5f, 0x48, 0x59, 0x42, 0x52, 0x49, 0x44, - 0x10, 0x02, 0x12, 0x0e, 0x0a, 0x0a, 0x41, 0x50, 0x49, 0x5f, 0x4f, 0x50, 0x41, 0x51, 0x55, 0x45, - 0x10, 0x03, 0x22, 0x92, 0x01, 0x0a, 0x0f, 0x53, 0x74, 0x72, 0x69, 0x70, 0x45, 0x6e, 0x75, 0x6d, - 0x50, 0x72, 0x65, 0x66, 0x69, 0x78, 0x12, 0x21, 0x0a, 0x1d, 0x53, 0x54, 0x52, 0x49, 0x50, 0x5f, - 0x45, 0x4e, 0x55, 0x4d, 0x5f, 0x50, 0x52, 0x45, 0x46, 0x49, 0x58, 0x5f, 0x55, 0x4e, 0x53, 0x50, - 0x45, 0x43, 0x49, 0x46, 0x49, 0x45, 0x44, 0x10, 0x00, 0x12, 0x1a, 0x0a, 0x16, 0x53, 0x54, 0x52, - 0x49, 0x50, 0x5f, 0x45, 0x4e, 0x55, 0x4d, 0x5f, 0x50, 0x52, 0x45, 0x46, 0x49, 0x58, 0x5f, 0x4b, - 0x45, 0x45, 0x50, 0x10, 0x01, 0x12, 0x23, 0x0a, 0x1f, 0x53, 0x54, 0x52, 0x49, 0x50, 0x5f, 0x45, - 0x4e, 0x55, 0x4d, 0x5f, 0x50, 0x52, 0x45, 0x46, 0x49, 0x58, 0x5f, 0x47, 0x45, 0x4e, 0x45, 0x52, - 0x41, 0x54, 0x45, 0x5f, 0x42, 0x4f, 0x54, 0x48, 0x10, 0x02, 0x12, 0x1b, 0x0a, 0x17, 0x53, 0x54, - 0x52, 0x49, 0x50, 0x5f, 0x45, 0x4e, 0x55, 0x4d, 0x5f, 0x50, 0x52, 0x45, 0x46, 0x49, 0x58, 0x5f, - 0x53, 0x54, 0x52, 0x49, 0x50, 0x10, 0x03, 0x3a, 0x3c, 0x0a, 0x02, 0x67, 0x6f, 0x12, 0x1b, 0x2e, - 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, - 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, 0x65, 0x74, 0x18, 0xea, 0x07, 0x20, 0x01, 0x28, - 0x0b, 0x32, 0x0e, 0x2e, 0x70, 0x62, 0x2e, 0x47, 0x6f, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, - 0x73, 0x52, 0x02, 0x67, 0x6f, 0x42, 0x2f, 0x5a, 0x2d, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, - 0x67, 0x6f, 0x6c, 0x61, 0x6e, 0x67, 0x2e, 0x6f, 0x72, 0x67, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, - 0x62, 0x75, 0x66, 0x2f, 0x74, 0x79, 0x70, 0x65, 0x73, 0x2f, 0x67, 0x6f, 0x66, 0x65, 0x61, 0x74, - 0x75, 0x72, 0x65, 0x73, 0x70, 0x62, -}) +const file_google_protobuf_go_features_proto_rawDesc = "" + + "\n" + + "!google/protobuf/go_features.proto\x12\x02pb\x1a google/protobuf/descriptor.proto\"\xab\x05\n" + + "\n" + + "GoFeatures\x12\xbe\x01\n" + + "\x1alegacy_unmarshal_json_enum\x18\x01 \x01(\bB\x80\x01\x88\x01\x01\x98\x01\x06\x98\x01\x01\xa2\x01\t\x12\x04true\x18\x84\a\xa2\x01\n" + + "\x12\x05false\x18\xe7\a\xb2\x01[\b\xe8\a\x10\xe8\a\x1aSThe legacy UnmarshalJSON API is deprecated and will be removed in a future edition.R\x17legacyUnmarshalJsonEnum\x12t\n" + + "\tapi_level\x18\x02 \x01(\x0e2\x17.pb.GoFeatures.APILevelB>\x88\x01\x01\x98\x01\x03\x98\x01\x01\xa2\x01\x1a\x12\x15API_LEVEL_UNSPECIFIED\x18\x84\a\xa2\x01\x0f\x12\n" + + "API_OPAQUE\x18\xe9\a\xb2\x01\x03\b\xe8\aR\bapiLevel\x12|\n" + + "\x11strip_enum_prefix\x18\x03 \x01(\x0e2\x1e.pb.GoFeatures.StripEnumPrefixB0\x88\x01\x01\x98\x01\x06\x98\x01\a\x98\x01\x01\xa2\x01\x1b\x12\x16STRIP_ENUM_PREFIX_KEEP\x18\x84\a\xb2\x01\x03\b\xe9\aR\x0fstripEnumPrefix\"S\n" + + "\bAPILevel\x12\x19\n" + + "\x15API_LEVEL_UNSPECIFIED\x10\x00\x12\f\n" + + "\bAPI_OPEN\x10\x01\x12\x0e\n" + + "\n" + + "API_HYBRID\x10\x02\x12\x0e\n" + + "\n" + + "API_OPAQUE\x10\x03\"\x92\x01\n" + + "\x0fStripEnumPrefix\x12!\n" + + "\x1dSTRIP_ENUM_PREFIX_UNSPECIFIED\x10\x00\x12\x1a\n" + + "\x16STRIP_ENUM_PREFIX_KEEP\x10\x01\x12#\n" + + "\x1fSTRIP_ENUM_PREFIX_GENERATE_BOTH\x10\x02\x12\x1b\n" + + "\x17STRIP_ENUM_PREFIX_STRIP\x10\x03:<\n" + + "\x02go\x12\x1b.google.protobuf.FeatureSet\x18\xea\a \x01(\v2\x0e.pb.GoFeaturesR\x02goB/Z-google.golang.org/protobuf/types/gofeaturespb" var ( file_google_protobuf_go_features_proto_rawDescOnce sync.Once diff --git a/vendor/google.golang.org/protobuf/types/known/anypb/any.pb.go b/vendor/google.golang.org/protobuf/types/known/anypb/any.pb.go index 497da66e91..1ff0d1494d 100644 --- a/vendor/google.golang.org/protobuf/types/known/anypb/any.pb.go +++ b/vendor/google.golang.org/protobuf/types/known/anypb/any.pb.go @@ -412,23 +412,13 @@ func (x *Any) GetValue() []byte { var File_google_protobuf_any_proto protoreflect.FileDescriptor -var file_google_protobuf_any_proto_rawDesc = string([]byte{ - 0x0a, 0x19, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, - 0x66, 0x2f, 0x61, 0x6e, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x0f, 0x67, 0x6f, 0x6f, - 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x22, 0x36, 0x0a, 0x03, - 0x41, 0x6e, 0x79, 0x12, 0x19, 0x0a, 0x08, 0x74, 0x79, 0x70, 0x65, 0x5f, 0x75, 0x72, 0x6c, 0x18, - 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x74, 0x79, 0x70, 0x65, 0x55, 0x72, 0x6c, 0x12, 0x14, - 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x05, 0x76, - 0x61, 0x6c, 0x75, 0x65, 0x42, 0x76, 0x0a, 0x13, 0x63, 0x6f, 0x6d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, - 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x42, 0x08, 0x41, 0x6e, 0x79, - 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x2c, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, - 0x67, 0x6f, 0x6c, 0x61, 0x6e, 0x67, 0x2e, 0x6f, 0x72, 0x67, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, - 0x62, 0x75, 0x66, 0x2f, 0x74, 0x79, 0x70, 0x65, 0x73, 0x2f, 0x6b, 0x6e, 0x6f, 0x77, 0x6e, 0x2f, - 0x61, 0x6e, 0x79, 0x70, 0x62, 0xa2, 0x02, 0x03, 0x47, 0x50, 0x42, 0xaa, 0x02, 0x1e, 0x47, 0x6f, - 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x57, 0x65, - 0x6c, 0x6c, 0x4b, 0x6e, 0x6f, 0x77, 0x6e, 0x54, 0x79, 0x70, 0x65, 0x73, 0x62, 0x06, 0x70, 0x72, - 0x6f, 0x74, 0x6f, 0x33, -}) +const file_google_protobuf_any_proto_rawDesc = "" + + "\n" + + "\x19google/protobuf/any.proto\x12\x0fgoogle.protobuf\"6\n" + + "\x03Any\x12\x19\n" + + "\btype_url\x18\x01 \x01(\tR\atypeUrl\x12\x14\n" + + "\x05value\x18\x02 \x01(\fR\x05valueBv\n" + + "\x13com.google.protobufB\bAnyProtoP\x01Z,google.golang.org/protobuf/types/known/anypb\xa2\x02\x03GPB\xaa\x02\x1eGoogle.Protobuf.WellKnownTypesb\x06proto3" var ( file_google_protobuf_any_proto_rawDescOnce sync.Once diff --git a/vendor/google.golang.org/protobuf/types/known/durationpb/duration.pb.go b/vendor/google.golang.org/protobuf/types/known/durationpb/duration.pb.go index 193880d181..ca2e7b38f4 100644 --- a/vendor/google.golang.org/protobuf/types/known/durationpb/duration.pb.go +++ b/vendor/google.golang.org/protobuf/types/known/durationpb/duration.pb.go @@ -289,24 +289,13 @@ func (x *Duration) GetNanos() int32 { var File_google_protobuf_duration_proto protoreflect.FileDescriptor -var file_google_protobuf_duration_proto_rawDesc = string([]byte{ - 0x0a, 0x1e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, - 0x66, 0x2f, 0x64, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, - 0x12, 0x0f, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, - 0x66, 0x22, 0x3a, 0x0a, 0x08, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x18, 0x0a, - 0x07, 0x73, 0x65, 0x63, 0x6f, 0x6e, 0x64, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x03, 0x52, 0x07, - 0x73, 0x65, 0x63, 0x6f, 0x6e, 0x64, 0x73, 0x12, 0x14, 0x0a, 0x05, 0x6e, 0x61, 0x6e, 0x6f, 0x73, - 0x18, 0x02, 0x20, 0x01, 0x28, 0x05, 0x52, 0x05, 0x6e, 0x61, 0x6e, 0x6f, 0x73, 0x42, 0x83, 0x01, - 0x0a, 0x13, 0x63, 0x6f, 0x6d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, - 0x74, 0x6f, 0x62, 0x75, 0x66, 0x42, 0x0d, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x50, - 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x31, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x67, - 0x6f, 0x6c, 0x61, 0x6e, 0x67, 0x2e, 0x6f, 0x72, 0x67, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, - 0x75, 0x66, 0x2f, 0x74, 0x79, 0x70, 0x65, 0x73, 0x2f, 0x6b, 0x6e, 0x6f, 0x77, 0x6e, 0x2f, 0x64, - 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x70, 0x62, 0xf8, 0x01, 0x01, 0xa2, 0x02, 0x03, 0x47, - 0x50, 0x42, 0xaa, 0x02, 0x1e, 0x47, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x50, 0x72, 0x6f, 0x74, - 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x57, 0x65, 0x6c, 0x6c, 0x4b, 0x6e, 0x6f, 0x77, 0x6e, 0x54, 0x79, - 0x70, 0x65, 0x73, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, -}) +const file_google_protobuf_duration_proto_rawDesc = "" + + "\n" + + "\x1egoogle/protobuf/duration.proto\x12\x0fgoogle.protobuf\":\n" + + "\bDuration\x12\x18\n" + + "\aseconds\x18\x01 \x01(\x03R\aseconds\x12\x14\n" + + "\x05nanos\x18\x02 \x01(\x05R\x05nanosB\x83\x01\n" + + "\x13com.google.protobufB\rDurationProtoP\x01Z1google.golang.org/protobuf/types/known/durationpb\xf8\x01\x01\xa2\x02\x03GPB\xaa\x02\x1eGoogle.Protobuf.WellKnownTypesb\x06proto3" var ( file_google_protobuf_duration_proto_rawDescOnce sync.Once diff --git a/vendor/google.golang.org/protobuf/types/known/emptypb/empty.pb.go b/vendor/google.golang.org/protobuf/types/known/emptypb/empty.pb.go index a5b8657c4b..1d7ee3b476 100644 --- a/vendor/google.golang.org/protobuf/types/known/emptypb/empty.pb.go +++ b/vendor/google.golang.org/protobuf/types/known/emptypb/empty.pb.go @@ -86,20 +86,12 @@ func (*Empty) Descriptor() ([]byte, []int) { var File_google_protobuf_empty_proto protoreflect.FileDescriptor -var file_google_protobuf_empty_proto_rawDesc = string([]byte{ - 0x0a, 0x1b, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, - 0x66, 0x2f, 0x65, 0x6d, 0x70, 0x74, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x0f, 0x67, - 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x22, 0x07, - 0x0a, 0x05, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x42, 0x7d, 0x0a, 0x13, 0x63, 0x6f, 0x6d, 0x2e, 0x67, - 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x42, 0x0a, - 0x45, 0x6d, 0x70, 0x74, 0x79, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x2e, 0x67, 0x6f, - 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x67, 0x6f, 0x6c, 0x61, 0x6e, 0x67, 0x2e, 0x6f, 0x72, 0x67, 0x2f, - 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x74, 0x79, 0x70, 0x65, 0x73, 0x2f, 0x6b, - 0x6e, 0x6f, 0x77, 0x6e, 0x2f, 0x65, 0x6d, 0x70, 0x74, 0x79, 0x70, 0x62, 0xf8, 0x01, 0x01, 0xa2, - 0x02, 0x03, 0x47, 0x50, 0x42, 0xaa, 0x02, 0x1e, 0x47, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x50, - 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x57, 0x65, 0x6c, 0x6c, 0x4b, 0x6e, 0x6f, 0x77, - 0x6e, 0x54, 0x79, 0x70, 0x65, 0x73, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, -}) +const file_google_protobuf_empty_proto_rawDesc = "" + + "\n" + + "\x1bgoogle/protobuf/empty.proto\x12\x0fgoogle.protobuf\"\a\n" + + "\x05EmptyB}\n" + + "\x13com.google.protobufB\n" + + "EmptyProtoP\x01Z.google.golang.org/protobuf/types/known/emptypb\xf8\x01\x01\xa2\x02\x03GPB\xaa\x02\x1eGoogle.Protobuf.WellKnownTypesb\x06proto3" var ( file_google_protobuf_empty_proto_rawDescOnce sync.Once diff --git a/vendor/google.golang.org/protobuf/types/known/structpb/struct.pb.go b/vendor/google.golang.org/protobuf/types/known/structpb/struct.pb.go index ecdd31ab53..30411b7283 100644 --- a/vendor/google.golang.org/protobuf/types/known/structpb/struct.pb.go +++ b/vendor/google.golang.org/protobuf/types/known/structpb/struct.pb.go @@ -672,55 +672,31 @@ func (x *ListValue) GetValues() []*Value { var File_google_protobuf_struct_proto protoreflect.FileDescriptor -var file_google_protobuf_struct_proto_rawDesc = string([]byte{ - 0x0a, 0x1c, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, - 0x66, 0x2f, 0x73, 0x74, 0x72, 0x75, 0x63, 0x74, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x0f, - 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x22, - 0x98, 0x01, 0x0a, 0x06, 0x53, 0x74, 0x72, 0x75, 0x63, 0x74, 0x12, 0x3b, 0x0a, 0x06, 0x66, 0x69, - 0x65, 0x6c, 0x64, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x23, 0x2e, 0x67, 0x6f, 0x6f, - 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x53, 0x74, 0x72, - 0x75, 0x63, 0x74, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, - 0x06, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x73, 0x1a, 0x51, 0x0a, 0x0b, 0x46, 0x69, 0x65, 0x6c, 0x64, - 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, - 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x2c, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, - 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x16, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, - 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, - 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x22, 0xb2, 0x02, 0x0a, 0x05, 0x56, - 0x61, 0x6c, 0x75, 0x65, 0x12, 0x3b, 0x0a, 0x0a, 0x6e, 0x75, 0x6c, 0x6c, 0x5f, 0x76, 0x61, 0x6c, - 0x75, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, - 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x4e, 0x75, 0x6c, 0x6c, 0x56, - 0x61, 0x6c, 0x75, 0x65, 0x48, 0x00, 0x52, 0x09, 0x6e, 0x75, 0x6c, 0x6c, 0x56, 0x61, 0x6c, 0x75, - 0x65, 0x12, 0x23, 0x0a, 0x0c, 0x6e, 0x75, 0x6d, 0x62, 0x65, 0x72, 0x5f, 0x76, 0x61, 0x6c, 0x75, - 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x01, 0x48, 0x00, 0x52, 0x0b, 0x6e, 0x75, 0x6d, 0x62, 0x65, - 0x72, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x23, 0x0a, 0x0c, 0x73, 0x74, 0x72, 0x69, 0x6e, 0x67, - 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x48, 0x00, 0x52, 0x0b, - 0x73, 0x74, 0x72, 0x69, 0x6e, 0x67, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x1f, 0x0a, 0x0a, 0x62, - 0x6f, 0x6f, 0x6c, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x08, 0x48, - 0x00, 0x52, 0x09, 0x62, 0x6f, 0x6f, 0x6c, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x3c, 0x0a, 0x0c, - 0x73, 0x74, 0x72, 0x75, 0x63, 0x74, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x05, 0x20, 0x01, - 0x28, 0x0b, 0x32, 0x17, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, - 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x53, 0x74, 0x72, 0x75, 0x63, 0x74, 0x48, 0x00, 0x52, 0x0b, 0x73, - 0x74, 0x72, 0x75, 0x63, 0x74, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x3b, 0x0a, 0x0a, 0x6c, 0x69, - 0x73, 0x74, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, - 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, - 0x2e, 0x4c, 0x69, 0x73, 0x74, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x48, 0x00, 0x52, 0x09, 0x6c, 0x69, - 0x73, 0x74, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x42, 0x06, 0x0a, 0x04, 0x6b, 0x69, 0x6e, 0x64, 0x22, - 0x3b, 0x0a, 0x09, 0x4c, 0x69, 0x73, 0x74, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x2e, 0x0a, 0x06, - 0x76, 0x61, 0x6c, 0x75, 0x65, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x16, 0x2e, 0x67, - 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x56, - 0x61, 0x6c, 0x75, 0x65, 0x52, 0x06, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x73, 0x2a, 0x1b, 0x0a, 0x09, - 0x4e, 0x75, 0x6c, 0x6c, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x0e, 0x0a, 0x0a, 0x4e, 0x55, 0x4c, - 0x4c, 0x5f, 0x56, 0x41, 0x4c, 0x55, 0x45, 0x10, 0x00, 0x42, 0x7f, 0x0a, 0x13, 0x63, 0x6f, 0x6d, - 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, - 0x42, 0x0b, 0x53, 0x74, 0x72, 0x75, 0x63, 0x74, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, - 0x2f, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x67, 0x6f, 0x6c, 0x61, 0x6e, 0x67, 0x2e, 0x6f, - 0x72, 0x67, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x74, 0x79, 0x70, 0x65, - 0x73, 0x2f, 0x6b, 0x6e, 0x6f, 0x77, 0x6e, 0x2f, 0x73, 0x74, 0x72, 0x75, 0x63, 0x74, 0x70, 0x62, - 0xf8, 0x01, 0x01, 0xa2, 0x02, 0x03, 0x47, 0x50, 0x42, 0xaa, 0x02, 0x1e, 0x47, 0x6f, 0x6f, 0x67, - 0x6c, 0x65, 0x2e, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x57, 0x65, 0x6c, 0x6c, - 0x4b, 0x6e, 0x6f, 0x77, 0x6e, 0x54, 0x79, 0x70, 0x65, 0x73, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, - 0x6f, 0x33, -}) +const file_google_protobuf_struct_proto_rawDesc = "" + + "\n" + + "\x1cgoogle/protobuf/struct.proto\x12\x0fgoogle.protobuf\"\x98\x01\n" + + "\x06Struct\x12;\n" + + "\x06fields\x18\x01 \x03(\v2#.google.protobuf.Struct.FieldsEntryR\x06fields\x1aQ\n" + + "\vFieldsEntry\x12\x10\n" + + "\x03key\x18\x01 \x01(\tR\x03key\x12,\n" + + "\x05value\x18\x02 \x01(\v2\x16.google.protobuf.ValueR\x05value:\x028\x01\"\xb2\x02\n" + + "\x05Value\x12;\n" + + "\n" + + "null_value\x18\x01 \x01(\x0e2\x1a.google.protobuf.NullValueH\x00R\tnullValue\x12#\n" + + "\fnumber_value\x18\x02 \x01(\x01H\x00R\vnumberValue\x12#\n" + + "\fstring_value\x18\x03 \x01(\tH\x00R\vstringValue\x12\x1f\n" + + "\n" + + "bool_value\x18\x04 \x01(\bH\x00R\tboolValue\x12<\n" + + "\fstruct_value\x18\x05 \x01(\v2\x17.google.protobuf.StructH\x00R\vstructValue\x12;\n" + + "\n" + + "list_value\x18\x06 \x01(\v2\x1a.google.protobuf.ListValueH\x00R\tlistValueB\x06\n" + + "\x04kind\";\n" + + "\tListValue\x12.\n" + + "\x06values\x18\x01 \x03(\v2\x16.google.protobuf.ValueR\x06values*\x1b\n" + + "\tNullValue\x12\x0e\n" + + "\n" + + "NULL_VALUE\x10\x00B\x7f\n" + + "\x13com.google.protobufB\vStructProtoP\x01Z/google.golang.org/protobuf/types/known/structpb\xf8\x01\x01\xa2\x02\x03GPB\xaa\x02\x1eGoogle.Protobuf.WellKnownTypesb\x06proto3" var ( file_google_protobuf_struct_proto_rawDescOnce sync.Once diff --git a/vendor/google.golang.org/protobuf/types/known/timestamppb/timestamp.pb.go b/vendor/google.golang.org/protobuf/types/known/timestamppb/timestamp.pb.go index 00ac835c0b..06d584c14b 100644 --- a/vendor/google.golang.org/protobuf/types/known/timestamppb/timestamp.pb.go +++ b/vendor/google.golang.org/protobuf/types/known/timestamppb/timestamp.pb.go @@ -298,24 +298,13 @@ func (x *Timestamp) GetNanos() int32 { var File_google_protobuf_timestamp_proto protoreflect.FileDescriptor -var file_google_protobuf_timestamp_proto_rawDesc = string([]byte{ - 0x0a, 0x1f, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, - 0x66, 0x2f, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x2e, 0x70, 0x72, 0x6f, 0x74, - 0x6f, 0x12, 0x0f, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, - 0x75, 0x66, 0x22, 0x3b, 0x0a, 0x09, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x12, - 0x18, 0x0a, 0x07, 0x73, 0x65, 0x63, 0x6f, 0x6e, 0x64, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x03, - 0x52, 0x07, 0x73, 0x65, 0x63, 0x6f, 0x6e, 0x64, 0x73, 0x12, 0x14, 0x0a, 0x05, 0x6e, 0x61, 0x6e, - 0x6f, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x05, 0x52, 0x05, 0x6e, 0x61, 0x6e, 0x6f, 0x73, 0x42, - 0x85, 0x01, 0x0a, 0x13, 0x63, 0x6f, 0x6d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, - 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x42, 0x0e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, - 0x6d, 0x70, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x32, 0x67, 0x6f, 0x6f, 0x67, 0x6c, - 0x65, 0x2e, 0x67, 0x6f, 0x6c, 0x61, 0x6e, 0x67, 0x2e, 0x6f, 0x72, 0x67, 0x2f, 0x70, 0x72, 0x6f, - 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x74, 0x79, 0x70, 0x65, 0x73, 0x2f, 0x6b, 0x6e, 0x6f, 0x77, - 0x6e, 0x2f, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x70, 0x62, 0xf8, 0x01, 0x01, - 0xa2, 0x02, 0x03, 0x47, 0x50, 0x42, 0xaa, 0x02, 0x1e, 0x47, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, - 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x57, 0x65, 0x6c, 0x6c, 0x4b, 0x6e, 0x6f, - 0x77, 0x6e, 0x54, 0x79, 0x70, 0x65, 0x73, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, -}) +const file_google_protobuf_timestamp_proto_rawDesc = "" + + "\n" + + "\x1fgoogle/protobuf/timestamp.proto\x12\x0fgoogle.protobuf\";\n" + + "\tTimestamp\x12\x18\n" + + "\aseconds\x18\x01 \x01(\x03R\aseconds\x12\x14\n" + + "\x05nanos\x18\x02 \x01(\x05R\x05nanosB\x85\x01\n" + + "\x13com.google.protobufB\x0eTimestampProtoP\x01Z2google.golang.org/protobuf/types/known/timestamppb\xf8\x01\x01\xa2\x02\x03GPB\xaa\x02\x1eGoogle.Protobuf.WellKnownTypesb\x06proto3" var ( file_google_protobuf_timestamp_proto_rawDescOnce sync.Once diff --git a/vendor/google.golang.org/protobuf/types/known/wrapperspb/wrappers.pb.go b/vendor/google.golang.org/protobuf/types/known/wrapperspb/wrappers.pb.go index 5de5301063..b7c2d0607d 100644 --- a/vendor/google.golang.org/protobuf/types/known/wrapperspb/wrappers.pb.go +++ b/vendor/google.golang.org/protobuf/types/known/wrapperspb/wrappers.pb.go @@ -28,10 +28,17 @@ // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // -// Wrappers for primitive (non-message) types. These types are useful -// for embedding primitives in the `google.protobuf.Any` type and for places -// where we need to distinguish between the absence of a primitive -// typed field and its default value. +// Wrappers for primitive (non-message) types. These types were needed +// for legacy reasons and are not recommended for use in new APIs. +// +// Historically these wrappers were useful to have presence on proto3 primitive +// fields, but proto3 syntax has been updated to support the `optional` keyword. +// Using that keyword is now the strongly preferred way to add presence to +// proto3 primitive fields. +// +// A secondary usecase was to embed primitives in the `google.protobuf.Any` +// type: it is now recommended that you embed your value in your own wrapper +// message which can be specifically documented. // // These wrappers have no meaningful use within repeated fields as they lack // the ability to detect presence on individual elements. @@ -54,6 +61,9 @@ import ( // Wrapper message for `double`. // // The JSON representation for `DoubleValue` is JSON number. +// +// Not recommended for use in new APIs, but still useful for legacy APIs and +// has no plan to be removed. type DoubleValue struct { state protoimpl.MessageState `protogen:"open.v1"` // The double value. @@ -107,6 +117,9 @@ func (x *DoubleValue) GetValue() float64 { // Wrapper message for `float`. // // The JSON representation for `FloatValue` is JSON number. +// +// Not recommended for use in new APIs, but still useful for legacy APIs and +// has no plan to be removed. type FloatValue struct { state protoimpl.MessageState `protogen:"open.v1"` // The float value. @@ -160,6 +173,9 @@ func (x *FloatValue) GetValue() float32 { // Wrapper message for `int64`. // // The JSON representation for `Int64Value` is JSON string. +// +// Not recommended for use in new APIs, but still useful for legacy APIs and +// has no plan to be removed. type Int64Value struct { state protoimpl.MessageState `protogen:"open.v1"` // The int64 value. @@ -213,6 +229,9 @@ func (x *Int64Value) GetValue() int64 { // Wrapper message for `uint64`. // // The JSON representation for `UInt64Value` is JSON string. +// +// Not recommended for use in new APIs, but still useful for legacy APIs and +// has no plan to be removed. type UInt64Value struct { state protoimpl.MessageState `protogen:"open.v1"` // The uint64 value. @@ -266,6 +285,9 @@ func (x *UInt64Value) GetValue() uint64 { // Wrapper message for `int32`. // // The JSON representation for `Int32Value` is JSON number. +// +// Not recommended for use in new APIs, but still useful for legacy APIs and +// has no plan to be removed. type Int32Value struct { state protoimpl.MessageState `protogen:"open.v1"` // The int32 value. @@ -319,6 +341,9 @@ func (x *Int32Value) GetValue() int32 { // Wrapper message for `uint32`. // // The JSON representation for `UInt32Value` is JSON number. +// +// Not recommended for use in new APIs, but still useful for legacy APIs and +// has no plan to be removed. type UInt32Value struct { state protoimpl.MessageState `protogen:"open.v1"` // The uint32 value. @@ -372,6 +397,9 @@ func (x *UInt32Value) GetValue() uint32 { // Wrapper message for `bool`. // // The JSON representation for `BoolValue` is JSON `true` and `false`. +// +// Not recommended for use in new APIs, but still useful for legacy APIs and +// has no plan to be removed. type BoolValue struct { state protoimpl.MessageState `protogen:"open.v1"` // The bool value. @@ -425,6 +453,9 @@ func (x *BoolValue) GetValue() bool { // Wrapper message for `string`. // // The JSON representation for `StringValue` is JSON string. +// +// Not recommended for use in new APIs, but still useful for legacy APIs and +// has no plan to be removed. type StringValue struct { state protoimpl.MessageState `protogen:"open.v1"` // The string value. @@ -478,6 +509,9 @@ func (x *StringValue) GetValue() string { // Wrapper message for `bytes`. // // The JSON representation for `BytesValue` is JSON string. +// +// Not recommended for use in new APIs, but still useful for legacy APIs and +// has no plan to be removed. type BytesValue struct { state protoimpl.MessageState `protogen:"open.v1"` // The bytes value. @@ -530,41 +564,32 @@ func (x *BytesValue) GetValue() []byte { var File_google_protobuf_wrappers_proto protoreflect.FileDescriptor -var file_google_protobuf_wrappers_proto_rawDesc = string([]byte{ - 0x0a, 0x1e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, - 0x66, 0x2f, 0x77, 0x72, 0x61, 0x70, 0x70, 0x65, 0x72, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, - 0x12, 0x0f, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, - 0x66, 0x22, 0x23, 0x0a, 0x0b, 0x44, 0x6f, 0x75, 0x62, 0x6c, 0x65, 0x56, 0x61, 0x6c, 0x75, 0x65, - 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x01, 0x52, - 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x22, 0x22, 0x0a, 0x0a, 0x46, 0x6c, 0x6f, 0x61, 0x74, 0x56, - 0x61, 0x6c, 0x75, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x01, 0x20, - 0x01, 0x28, 0x02, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x22, 0x22, 0x0a, 0x0a, 0x49, 0x6e, - 0x74, 0x36, 0x34, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, - 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x03, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x22, 0x23, - 0x0a, 0x0b, 0x55, 0x49, 0x6e, 0x74, 0x36, 0x34, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x14, 0x0a, - 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x04, 0x52, 0x05, 0x76, 0x61, - 0x6c, 0x75, 0x65, 0x22, 0x22, 0x0a, 0x0a, 0x49, 0x6e, 0x74, 0x33, 0x32, 0x56, 0x61, 0x6c, 0x75, - 0x65, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x05, - 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x22, 0x23, 0x0a, 0x0b, 0x55, 0x49, 0x6e, 0x74, 0x33, - 0x32, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, - 0x01, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x22, 0x21, 0x0a, 0x09, - 0x42, 0x6f, 0x6f, 0x6c, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, - 0x75, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x08, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x22, - 0x23, 0x0a, 0x0b, 0x53, 0x74, 0x72, 0x69, 0x6e, 0x67, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x14, - 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x76, - 0x61, 0x6c, 0x75, 0x65, 0x22, 0x22, 0x0a, 0x0a, 0x42, 0x79, 0x74, 0x65, 0x73, 0x56, 0x61, 0x6c, - 0x75, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, - 0x0c, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x42, 0x83, 0x01, 0x0a, 0x13, 0x63, 0x6f, 0x6d, - 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, - 0x42, 0x0d, 0x57, 0x72, 0x61, 0x70, 0x70, 0x65, 0x72, 0x73, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, - 0x01, 0x5a, 0x31, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x67, 0x6f, 0x6c, 0x61, 0x6e, 0x67, - 0x2e, 0x6f, 0x72, 0x67, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x74, 0x79, - 0x70, 0x65, 0x73, 0x2f, 0x6b, 0x6e, 0x6f, 0x77, 0x6e, 0x2f, 0x77, 0x72, 0x61, 0x70, 0x70, 0x65, - 0x72, 0x73, 0x70, 0x62, 0xf8, 0x01, 0x01, 0xa2, 0x02, 0x03, 0x47, 0x50, 0x42, 0xaa, 0x02, 0x1e, - 0x47, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, - 0x57, 0x65, 0x6c, 0x6c, 0x4b, 0x6e, 0x6f, 0x77, 0x6e, 0x54, 0x79, 0x70, 0x65, 0x73, 0x62, 0x06, - 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, -}) +const file_google_protobuf_wrappers_proto_rawDesc = "" + + "\n" + + "\x1egoogle/protobuf/wrappers.proto\x12\x0fgoogle.protobuf\"#\n" + + "\vDoubleValue\x12\x14\n" + + "\x05value\x18\x01 \x01(\x01R\x05value\"\"\n" + + "\n" + + "FloatValue\x12\x14\n" + + "\x05value\x18\x01 \x01(\x02R\x05value\"\"\n" + + "\n" + + "Int64Value\x12\x14\n" + + "\x05value\x18\x01 \x01(\x03R\x05value\"#\n" + + "\vUInt64Value\x12\x14\n" + + "\x05value\x18\x01 \x01(\x04R\x05value\"\"\n" + + "\n" + + "Int32Value\x12\x14\n" + + "\x05value\x18\x01 \x01(\x05R\x05value\"#\n" + + "\vUInt32Value\x12\x14\n" + + "\x05value\x18\x01 \x01(\rR\x05value\"!\n" + + "\tBoolValue\x12\x14\n" + + "\x05value\x18\x01 \x01(\bR\x05value\"#\n" + + "\vStringValue\x12\x14\n" + + "\x05value\x18\x01 \x01(\tR\x05value\"\"\n" + + "\n" + + "BytesValue\x12\x14\n" + + "\x05value\x18\x01 \x01(\fR\x05valueB\x83\x01\n" + + "\x13com.google.protobufB\rWrappersProtoP\x01Z1google.golang.org/protobuf/types/known/wrapperspb\xf8\x01\x01\xa2\x02\x03GPB\xaa\x02\x1eGoogle.Protobuf.WellKnownTypesb\x06proto3" var ( file_google_protobuf_wrappers_proto_rawDescOnce sync.Once diff --git a/vendor/gopkg.in/evanphx/json-patch.v4/README.md b/vendor/gopkg.in/evanphx/json-patch.v4/README.md index 28e3516937..86fefd5bf7 100644 --- a/vendor/gopkg.in/evanphx/json-patch.v4/README.md +++ b/vendor/gopkg.in/evanphx/json-patch.v4/README.md @@ -4,7 +4,7 @@ well as for calculating & applying [RFC7396 JSON merge patches](https://tools.ietf.org/html/rfc7396). [![GoDoc](https://godoc.org/github.com/evanphx/json-patch?status.svg)](http://godoc.org/github.com/evanphx/json-patch) -[![Build Status](https://travis-ci.org/evanphx/json-patch.svg?branch=master)](https://travis-ci.org/evanphx/json-patch) +[![Build Status](https://github.com/evanphx/json-patch/actions/workflows/go.yml/badge.svg)](https://github.com/evanphx/json-patch/actions/workflows/go.yml) [![Report Card](https://goreportcard.com/badge/github.com/evanphx/json-patch)](https://goreportcard.com/report/github.com/evanphx/json-patch) # Get It! @@ -14,9 +14,7 @@ well as for calculating & applying [RFC7396 JSON merge patches](https://tools.ie go get -u github.com/evanphx/json-patch/v5 ``` -**Stable Versions**: -* Version 5: `go get -u gopkg.in/evanphx/json-patch.v5` -* Version 4: `go get -u gopkg.in/evanphx/json-patch.v4` +If you need version 4, use `go get -u gopkg.in/evanphx/json-patch.v4` (previous versions below `v3` are unavailable) @@ -314,4 +312,4 @@ go test -cover ./... ``` Builds for pull requests are tested automatically -using [TravisCI](https://travis-ci.org/evanphx/json-patch). +using [GitHub Actions](https://github.com/evanphx/json-patch/actions/workflows/go.yml). diff --git a/vendor/gopkg.in/evanphx/json-patch.v4/patch.go b/vendor/gopkg.in/evanphx/json-patch.v4/patch.go index dc2b7e51e6..95136681ba 100644 --- a/vendor/gopkg.in/evanphx/json-patch.v4/patch.go +++ b/vendor/gopkg.in/evanphx/json-patch.v4/patch.go @@ -3,11 +3,10 @@ package jsonpatch import ( "bytes" "encoding/json" + "errors" "fmt" "strconv" "strings" - - "github.com/pkg/errors" ) const ( @@ -277,7 +276,7 @@ func (o Operation) Path() (string, error) { return op, nil } - return "unknown", errors.Wrapf(ErrMissing, "operation missing path field") + return "unknown", fmt.Errorf("operation missing path field: %w", ErrMissing) } // From reads the "from" field of the Operation. @@ -294,7 +293,7 @@ func (o Operation) From() (string, error) { return op, nil } - return "unknown", errors.Wrapf(ErrMissing, "operation, missing from field") + return "unknown", fmt.Errorf("operation, missing from field: %w", ErrMissing) } func (o Operation) value() *lazyNode { @@ -319,7 +318,7 @@ func (o Operation) ValueInterface() (interface{}, error) { return v, nil } - return nil, errors.Wrapf(ErrMissing, "operation, missing value field") + return nil, fmt.Errorf("operation, missing value field: %w", ErrMissing) } func isArray(buf []byte) bool { @@ -359,7 +358,7 @@ func findObject(pd *container, path string) (container, string) { next, ok := doc.get(decodePatchKey(part)) - if next == nil || ok != nil { + if next == nil || ok != nil || next.raw == nil { return nil, "" } @@ -398,7 +397,7 @@ func (d *partialDoc) get(key string) (*lazyNode, error) { func (d *partialDoc) remove(key string) error { _, ok := (*d)[key] if !ok { - return errors.Wrapf(ErrMissing, "Unable to remove nonexistent key: %s", key) + return fmt.Errorf("Unable to remove nonexistent key: %s: %w", key, ErrMissing) } delete(*d, key) @@ -415,10 +414,10 @@ func (d *partialArray) set(key string, val *lazyNode) error { if idx < 0 { if !SupportNegativeIndices { - return errors.Wrapf(ErrInvalidIndex, "Unable to access invalid index: %d", idx) + return fmt.Errorf("Unable to access invalid index: %d: %w", idx, ErrInvalidIndex) } if idx < -len(*d) { - return errors.Wrapf(ErrInvalidIndex, "Unable to access invalid index: %d", idx) + return fmt.Errorf("Unable to access invalid index: %d: %w", idx, ErrInvalidIndex) } idx += len(*d) } @@ -435,7 +434,7 @@ func (d *partialArray) add(key string, val *lazyNode) error { idx, err := strconv.Atoi(key) if err != nil { - return errors.Wrapf(err, "value was not a proper array index: '%s'", key) + return fmt.Errorf("value was not a proper array index: '%s': %w", key, err) } sz := len(*d) + 1 @@ -445,15 +444,15 @@ func (d *partialArray) add(key string, val *lazyNode) error { cur := *d if idx >= len(ary) { - return errors.Wrapf(ErrInvalidIndex, "Unable to access invalid index: %d", idx) + return fmt.Errorf("Unable to access invalid index: %d: %w", idx, ErrInvalidIndex) } if idx < 0 { if !SupportNegativeIndices { - return errors.Wrapf(ErrInvalidIndex, "Unable to access invalid index: %d", idx) + return fmt.Errorf("Unable to access invalid index: %d: %w", idx, ErrInvalidIndex) } if idx < -len(ary) { - return errors.Wrapf(ErrInvalidIndex, "Unable to access invalid index: %d", idx) + return fmt.Errorf("Unable to access invalid index: %d: %w", idx, ErrInvalidIndex) } idx += len(ary) } @@ -475,16 +474,16 @@ func (d *partialArray) get(key string) (*lazyNode, error) { if idx < 0 { if !SupportNegativeIndices { - return nil, errors.Wrapf(ErrInvalidIndex, "Unable to access invalid index: %d", idx) + return nil, fmt.Errorf("Unable to access invalid index: %d: %w", idx, ErrInvalidIndex) } if idx < -len(*d) { - return nil, errors.Wrapf(ErrInvalidIndex, "Unable to access invalid index: %d", idx) + return nil, fmt.Errorf("Unable to access invalid index: %d: %w", idx, ErrInvalidIndex) } idx += len(*d) } if idx >= len(*d) { - return nil, errors.Wrapf(ErrInvalidIndex, "Unable to access invalid index: %d", idx) + return nil, fmt.Errorf("Unable to access invalid index: %d: %w", idx, ErrInvalidIndex) } return (*d)[idx], nil @@ -499,15 +498,15 @@ func (d *partialArray) remove(key string) error { cur := *d if idx >= len(cur) { - return errors.Wrapf(ErrInvalidIndex, "Unable to access invalid index: %d", idx) + return fmt.Errorf("Unable to access invalid index: %d: %w", idx, ErrInvalidIndex) } if idx < 0 { if !SupportNegativeIndices { - return errors.Wrapf(ErrInvalidIndex, "Unable to access invalid index: %d", idx) + return fmt.Errorf("Unable to access invalid index: %d: %w", idx, ErrInvalidIndex) } if idx < -len(cur) { - return errors.Wrapf(ErrInvalidIndex, "Unable to access invalid index: %d", idx) + return fmt.Errorf("Unable to access invalid index: %d: %w", idx, ErrInvalidIndex) } idx += len(cur) } @@ -525,18 +524,18 @@ func (d *partialArray) remove(key string) error { func (p Patch) add(doc *container, op Operation) error { path, err := op.Path() if err != nil { - return errors.Wrapf(ErrMissing, "add operation failed to decode path") + return fmt.Errorf("add operation failed to decode path: %w", ErrMissing) } con, key := findObject(doc, path) if con == nil { - return errors.Wrapf(ErrMissing, "add operation does not apply: doc is missing path: \"%s\"", path) + return fmt.Errorf("add operation does not apply: doc is missing path: \"%s\": %w", path, ErrMissing) } err = con.add(key, op.value()) if err != nil { - return errors.Wrapf(err, "error in add for path: '%s'", path) + return fmt.Errorf("error in add for path: '%s': %w", path, err) } return nil @@ -545,18 +544,18 @@ func (p Patch) add(doc *container, op Operation) error { func (p Patch) remove(doc *container, op Operation) error { path, err := op.Path() if err != nil { - return errors.Wrapf(ErrMissing, "remove operation failed to decode path") + return fmt.Errorf("remove operation failed to decode path: %w", ErrMissing) } con, key := findObject(doc, path) if con == nil { - return errors.Wrapf(ErrMissing, "remove operation does not apply: doc is missing path: \"%s\"", path) + return fmt.Errorf("remove operation does not apply: doc is missing path: \"%s\": %w", path, ErrMissing) } err = con.remove(key) if err != nil { - return errors.Wrapf(err, "error in remove for path: '%s'", path) + return fmt.Errorf("error in remove for path: '%s': %w", path, err) } return nil @@ -565,7 +564,7 @@ func (p Patch) remove(doc *container, op Operation) error { func (p Patch) replace(doc *container, op Operation) error { path, err := op.Path() if err != nil { - return errors.Wrapf(err, "replace operation failed to decode path") + return fmt.Errorf("replace operation failed to decode path: %w", err) } if path == "" { @@ -574,7 +573,7 @@ func (p Patch) replace(doc *container, op Operation) error { if val.which == eRaw { if !val.tryDoc() { if !val.tryAry() { - return errors.Wrapf(err, "replace operation value must be object or array") + return fmt.Errorf("replace operation value must be object or array: %w", err) } } } @@ -585,7 +584,7 @@ func (p Patch) replace(doc *container, op Operation) error { case eDoc: *doc = &val.doc case eRaw: - return errors.Wrapf(err, "replace operation hit impossible case") + return fmt.Errorf("replace operation hit impossible case: %w", err) } return nil @@ -594,17 +593,17 @@ func (p Patch) replace(doc *container, op Operation) error { con, key := findObject(doc, path) if con == nil { - return errors.Wrapf(ErrMissing, "replace operation does not apply: doc is missing path: %s", path) + return fmt.Errorf("replace operation does not apply: doc is missing path: %s: %w", path, ErrMissing) } _, ok := con.get(key) if ok != nil { - return errors.Wrapf(ErrMissing, "replace operation does not apply: doc is missing key: %s", path) + return fmt.Errorf("replace operation does not apply: doc is missing key: %s: %w", path, ErrMissing) } err = con.set(key, op.value()) if err != nil { - return errors.Wrapf(err, "error in remove for path: '%s'", path) + return fmt.Errorf("error in remove for path: '%s': %w", path, err) } return nil @@ -613,39 +612,39 @@ func (p Patch) replace(doc *container, op Operation) error { func (p Patch) move(doc *container, op Operation) error { from, err := op.From() if err != nil { - return errors.Wrapf(err, "move operation failed to decode from") + return fmt.Errorf("move operation failed to decode from: %w", err) } con, key := findObject(doc, from) if con == nil { - return errors.Wrapf(ErrMissing, "move operation does not apply: doc is missing from path: %s", from) + return fmt.Errorf("move operation does not apply: doc is missing from path: %s: %w", from, ErrMissing) } val, err := con.get(key) if err != nil { - return errors.Wrapf(err, "error in move for path: '%s'", key) + return fmt.Errorf("error in move for path: '%s': %w", key, err) } err = con.remove(key) if err != nil { - return errors.Wrapf(err, "error in move for path: '%s'", key) + return fmt.Errorf("error in move for path: '%s': %w", key, err) } path, err := op.Path() if err != nil { - return errors.Wrapf(err, "move operation failed to decode path") + return fmt.Errorf("move operation failed to decode path: %w", err) } con, key = findObject(doc, path) if con == nil { - return errors.Wrapf(ErrMissing, "move operation does not apply: doc is missing destination path: %s", path) + return fmt.Errorf("move operation does not apply: doc is missing destination path: %s: %w", path, ErrMissing) } err = con.add(key, val) if err != nil { - return errors.Wrapf(err, "error in move for path: '%s'", path) + return fmt.Errorf("error in move for path: '%s': %w", path, err) } return nil @@ -654,7 +653,7 @@ func (p Patch) move(doc *container, op Operation) error { func (p Patch) test(doc *container, op Operation) error { path, err := op.Path() if err != nil { - return errors.Wrapf(err, "test operation failed to decode path") + return fmt.Errorf("test operation failed to decode path: %w", err) } if path == "" { @@ -673,67 +672,67 @@ func (p Patch) test(doc *container, op Operation) error { return nil } - return errors.Wrapf(ErrTestFailed, "testing value %s failed", path) + return fmt.Errorf("testing value %s failed: %w", path, ErrTestFailed) } con, key := findObject(doc, path) if con == nil { - return errors.Wrapf(ErrMissing, "test operation does not apply: is missing path: %s", path) + return fmt.Errorf("test operation does not apply: is missing path: %s: %w", path, ErrMissing) } val, err := con.get(key) if err != nil { - return errors.Wrapf(err, "error in test for path: '%s'", path) + return fmt.Errorf("error in test for path: '%s': %w", path, err) } if val == nil { - if op.value().raw == nil { + if op.value() == nil || op.value().raw == nil { return nil } - return errors.Wrapf(ErrTestFailed, "testing value %s failed", path) + return fmt.Errorf("testing value %s failed: %w", path, ErrTestFailed) } else if op.value() == nil { - return errors.Wrapf(ErrTestFailed, "testing value %s failed", path) + return fmt.Errorf("testing value %s failed: %w", path, ErrTestFailed) } if val.equal(op.value()) { return nil } - return errors.Wrapf(ErrTestFailed, "testing value %s failed", path) + return fmt.Errorf("testing value %s failed: %w", path, ErrTestFailed) } func (p Patch) copy(doc *container, op Operation, accumulatedCopySize *int64) error { from, err := op.From() if err != nil { - return errors.Wrapf(err, "copy operation failed to decode from") + return fmt.Errorf("copy operation failed to decode from: %w", err) } con, key := findObject(doc, from) if con == nil { - return errors.Wrapf(ErrMissing, "copy operation does not apply: doc is missing from path: %s", from) + return fmt.Errorf("copy operation does not apply: doc is missing from path: %s: %w", from, ErrMissing) } val, err := con.get(key) if err != nil { - return errors.Wrapf(err, "error in copy for from: '%s'", from) + return fmt.Errorf("error in copy for from: '%s': %w", from, err) } path, err := op.Path() if err != nil { - return errors.Wrapf(ErrMissing, "copy operation failed to decode path") + return fmt.Errorf("copy operation failed to decode path: %w", ErrMissing) } con, key = findObject(doc, path) if con == nil { - return errors.Wrapf(ErrMissing, "copy operation does not apply: doc is missing destination path: %s", path) + return fmt.Errorf("copy operation does not apply: doc is missing destination path: %s: %w", path, ErrMissing) } valCopy, sz, err := deepCopy(val) if err != nil { - return errors.Wrapf(err, "error while performing deep copy") + return fmt.Errorf("error while performing deep copy: %w", err) } (*accumulatedCopySize) += int64(sz) @@ -743,7 +742,7 @@ func (p Patch) copy(doc *container, op Operation, accumulatedCopySize *int64) er err = con.add(key, valCopy) if err != nil { - return errors.Wrapf(err, "error while adding value during copy") + return fmt.Errorf("error while adding value during copy: %w", err) } return nil diff --git a/vendor/k8s.io/client-go/tools/cache/controller.go b/vendor/k8s.io/client-go/tools/cache/controller.go index 5f983b6b63..e07c04e625 100644 --- a/vendor/k8s.io/client-go/tools/cache/controller.go +++ b/vendor/k8s.io/client-go/tools/cache/controller.go @@ -596,16 +596,7 @@ func newInformer(clientState Store, options InformerOptions) Controller { // KeyLister, that way resync operations will result in the correct set // of update/delete deltas. - var fifo Queue - if clientgofeaturegate.FeatureGates().Enabled(clientgofeaturegate.InOrderInformers) { - fifo = NewRealFIFO(MetaNamespaceKeyFunc, clientState, options.Transform) - } else { - fifo = NewDeltaFIFOWithOptions(DeltaFIFOOptions{ - KnownObjects: clientState, - EmitDeltaTypeReplaced: true, - Transformer: options.Transform, - }) - } + fifo := newQueueFIFO(clientState, options.Transform) cfg := &Config{ Queue: fifo, @@ -623,3 +614,15 @@ func newInformer(clientState Store, options InformerOptions) Controller { } return New(cfg) } + +func newQueueFIFO(clientState Store, transform TransformFunc) Queue { + if clientgofeaturegate.FeatureGates().Enabled(clientgofeaturegate.InOrderInformers) { + return NewRealFIFO(MetaNamespaceKeyFunc, clientState, transform) + } else { + return NewDeltaFIFOWithOptions(DeltaFIFOOptions{ + KnownObjects: clientState, + EmitDeltaTypeReplaced: true, + Transformer: transform, + }) + } +} diff --git a/vendor/k8s.io/client-go/tools/cache/delta_fifo.go b/vendor/k8s.io/client-go/tools/cache/delta_fifo.go index 9d9e238ccc..a0d7a834aa 100644 --- a/vendor/k8s.io/client-go/tools/cache/delta_fifo.go +++ b/vendor/k8s.io/client-go/tools/cache/delta_fifo.go @@ -270,7 +270,8 @@ func NewDeltaFIFOWithOptions(opts DeltaFIFOOptions) *DeltaFIFO { } var ( - _ = Queue(&DeltaFIFO{}) // DeltaFIFO is a Queue + _ = Queue(&DeltaFIFO{}) // DeltaFIFO is a Queue + _ = TransformingStore(&DeltaFIFO{}) // DeltaFIFO implements TransformingStore to allow memory optimizations ) var ( diff --git a/vendor/k8s.io/client-go/tools/cache/reflector.go b/vendor/k8s.io/client-go/tools/cache/reflector.go index ee9be77278..6fd43375f6 100644 --- a/vendor/k8s.io/client-go/tools/cache/reflector.go +++ b/vendor/k8s.io/client-go/tools/cache/reflector.go @@ -80,7 +80,7 @@ type ReflectorStore interface { // TransformingStore is an optional interface that can be implemented by the provided store. // If implemented on the provided store reflector will use the same transformer in its internal stores. type TransformingStore interface { - Store + ReflectorStore Transformer() TransformFunc } @@ -726,9 +726,11 @@ func (r *Reflector) watchList(ctx context.Context) (watch.Interface, error) { return false } + var transformer TransformFunc storeOpts := []StoreOption{} if tr, ok := r.store.(TransformingStore); ok && tr.Transformer() != nil { - storeOpts = append(storeOpts, WithTransformer(tr.Transformer())) + transformer = tr.Transformer() + storeOpts = append(storeOpts, WithTransformer(transformer)) } initTrace := trace.New("Reflector WatchList", trace.Field{Key: "name", Value: r.name}) @@ -788,7 +790,7 @@ func (r *Reflector) watchList(ctx context.Context) (watch.Interface, error) { // we utilize the temporaryStore to ensure independence from the current store implementation. // as of today, the store is implemented as a queue and will be drained by the higher-level // component as soon as it finishes replacing the content. - checkWatchListDataConsistencyIfRequested(ctx, r.name, resourceVersion, r.listerWatcher.ListWithContext, temporaryStore.List) + checkWatchListDataConsistencyIfRequested(ctx, r.name, resourceVersion, r.listerWatcher.ListWithContext, transformer, temporaryStore.List) if err := r.store.Replace(temporaryStore.List(), resourceVersion); err != nil { return nil, fmt.Errorf("unable to sync watch-list result: %w", err) diff --git a/vendor/k8s.io/client-go/tools/cache/reflector_data_consistency_detector.go b/vendor/k8s.io/client-go/tools/cache/reflector_data_consistency_detector.go index a7e0d9c436..4119c78a6c 100644 --- a/vendor/k8s.io/client-go/tools/cache/reflector_data_consistency_detector.go +++ b/vendor/k8s.io/client-go/tools/cache/reflector_data_consistency_detector.go @@ -33,11 +33,11 @@ import ( // // Note that this function will panic when data inconsistency is detected. // This is intentional because we want to catch it in the CI. -func checkWatchListDataConsistencyIfRequested[T runtime.Object, U any](ctx context.Context, identity string, lastSyncedResourceVersion string, listFn consistencydetector.ListFunc[T], retrieveItemsFn consistencydetector.RetrieveItemsFunc[U]) { +func checkWatchListDataConsistencyIfRequested[T runtime.Object, U any](ctx context.Context, identity string, lastSyncedResourceVersion string, listFn consistencydetector.ListFunc[T], listItemTransformFunc func(interface{}) (interface{}, error), retrieveItemsFn consistencydetector.RetrieveItemsFunc[U]) { if !consistencydetector.IsDataConsistencyDetectionForWatchListEnabled() { return } // for informers we pass an empty ListOptions because // listFn might be wrapped for filtering during informer construction. - consistencydetector.CheckDataConsistency(ctx, identity, lastSyncedResourceVersion, listFn, metav1.ListOptions{}, retrieveItemsFn) + consistencydetector.CheckDataConsistency(ctx, identity, lastSyncedResourceVersion, listFn, listItemTransformFunc, metav1.ListOptions{}, retrieveItemsFn) } diff --git a/vendor/k8s.io/client-go/tools/cache/shared_informer.go b/vendor/k8s.io/client-go/tools/cache/shared_informer.go index 99e5fcd180..1c12aa2d64 100644 --- a/vendor/k8s.io/client-go/tools/cache/shared_informer.go +++ b/vendor/k8s.io/client-go/tools/cache/shared_informer.go @@ -539,16 +539,7 @@ func (s *sharedIndexInformer) RunWithContext(ctx context.Context) { s.startedLock.Lock() defer s.startedLock.Unlock() - var fifo Queue - if clientgofeaturegate.FeatureGates().Enabled(clientgofeaturegate.InOrderInformers) { - fifo = NewRealFIFO(MetaNamespaceKeyFunc, s.indexer, s.transform) - } else { - fifo = NewDeltaFIFOWithOptions(DeltaFIFOOptions{ - KnownObjects: s.indexer, - EmitDeltaTypeReplaced: true, - Transformer: s.transform, - }) - } + fifo := newQueueFIFO(s.indexer, s.transform) cfg := &Config{ Queue: fifo, diff --git a/vendor/k8s.io/client-go/tools/cache/the_real_fifo.go b/vendor/k8s.io/client-go/tools/cache/the_real_fifo.go index ef322bea85..b907410dc0 100644 --- a/vendor/k8s.io/client-go/tools/cache/the_real_fifo.go +++ b/vendor/k8s.io/client-go/tools/cache/the_real_fifo.go @@ -61,7 +61,8 @@ type RealFIFO struct { } var ( - _ = Queue(&RealFIFO{}) // RealFIFO is a Queue + _ = Queue(&RealFIFO{}) // RealFIFO is a Queue + _ = TransformingStore(&RealFIFO{}) // RealFIFO implements TransformingStore to allow memory optimizations ) // Close the queue. diff --git a/vendor/k8s.io/client-go/tools/leaderelection/resourcelock/leaselock.go b/vendor/k8s.io/client-go/tools/leaderelection/resourcelock/leaselock.go index 5d2054155c..79a748b74d 100644 --- a/vendor/k8s.io/client-go/tools/leaderelection/resourcelock/leaselock.go +++ b/vendor/k8s.io/client-go/tools/leaderelection/resourcelock/leaselock.go @@ -77,6 +77,9 @@ func (ll *LeaseLock) Update(ctx context.Context, ler LeaderElectionRecord) error ll.lease.Spec = LeaderElectionRecordToLeaseSpec(&ler) if ll.Labels != nil { + if ll.lease.Labels == nil { + ll.lease.Labels = map[string]string{} + } // Only overwrite the labels that are specifically set for k, v := range ll.Labels { ll.lease.Labels[k] = v diff --git a/vendor/k8s.io/client-go/util/cert/cert.go b/vendor/k8s.io/client-go/util/cert/cert.go index 1220461264..48c78b595e 100644 --- a/vendor/k8s.io/client-go/util/cert/cert.go +++ b/vendor/k8s.io/client-go/util/cert/cert.go @@ -75,13 +75,15 @@ func NewSelfSignedCACert(cfg Config, key crypto.Signer) (*x509.Certificate, erro CommonName: cfg.CommonName, Organization: cfg.Organization, }, - DNSNames: []string{cfg.CommonName}, NotBefore: notBefore, NotAfter: now.Add(duration365d * 10).UTC(), KeyUsage: x509.KeyUsageKeyEncipherment | x509.KeyUsageDigitalSignature | x509.KeyUsageCertSign, BasicConstraintsValid: true, IsCA: true, } + if len(cfg.CommonName) > 0 { + tmpl.DNSNames = []string{cfg.CommonName} + } certDERBytes, err := x509.CreateCertificate(cryptorand.Reader, &tmpl, &tmpl, key.Public(), key) if err != nil { diff --git a/vendor/k8s.io/client-go/util/consistencydetector/data_consistency_detector.go b/vendor/k8s.io/client-go/util/consistencydetector/data_consistency_detector.go index 06f172d82b..72c0124a0f 100644 --- a/vendor/k8s.io/client-go/util/consistencydetector/data_consistency_detector.go +++ b/vendor/k8s.io/client-go/util/consistencydetector/data_consistency_detector.go @@ -45,16 +45,28 @@ func IsDataConsistencyDetectionForWatchListEnabled() bool { return dataConsistencyDetectionForWatchListEnabled } +// SetDataConsistencyDetectionForWatchListEnabledForTest allows to enable/disable data consistency detection for testing purposes. +// It returns a function that restores the original value. +func SetDataConsistencyDetectionForWatchListEnabledForTest(enabled bool) func() { + original := dataConsistencyDetectionForWatchListEnabled + dataConsistencyDetectionForWatchListEnabled = enabled + return func() { + dataConsistencyDetectionForWatchListEnabled = original + } +} + type RetrieveItemsFunc[U any] func() []U type ListFunc[T runtime.Object] func(ctx context.Context, options metav1.ListOptions) (T, error) +type TransformFunc func(interface{}) (interface{}, error) + // CheckDataConsistency exists solely for testing purposes. // we cannot use checkWatchListDataConsistencyIfRequested because // it is guarded by an environmental variable. // we cannot manipulate the environmental variable because // it will affect other tests in this package. -func CheckDataConsistency[T runtime.Object, U any](ctx context.Context, identity string, lastSyncedResourceVersion string, listFn ListFunc[T], listOptions metav1.ListOptions, retrieveItemsFn RetrieveItemsFunc[U]) { +func CheckDataConsistency[T runtime.Object, U any](ctx context.Context, identity string, lastSyncedResourceVersion string, listFn ListFunc[T], listItemTransformFunc TransformFunc, listOptions metav1.ListOptions, retrieveItemsFn RetrieveItemsFunc[U]) { if !canFormAdditionalListCall(lastSyncedResourceVersion, listOptions) { klog.V(4).Infof("data consistency check for %s is enabled but the parameters (RV, ListOptions) doesn't allow for creating a valid LIST request. Skipping the data consistency check.", identity) return @@ -84,6 +96,15 @@ func CheckDataConsistency[T runtime.Object, U any](ctx context.Context, identity if err != nil { panic(err) // this should never happen } + if listItemTransformFunc != nil { + for i := range rawListItems { + obj, err := listItemTransformFunc(rawListItems[i]) + if err != nil { + panic(err) + } + rawListItems[i] = obj.(runtime.Object) + } + } listItems := toMetaObjectSliceOrDie(rawListItems) sort.Sort(byUID(listItems)) diff --git a/vendor/k8s.io/utils/net/multi_listen.go b/vendor/k8s.io/utils/net/multi_listen.go index 7cb7795bec..e5d508055d 100644 --- a/vendor/k8s.io/utils/net/multi_listen.go +++ b/vendor/k8s.io/utils/net/multi_listen.go @@ -21,6 +21,7 @@ import ( "fmt" "net" "sync" + "sync/atomic" ) // connErrPair pairs conn and error which is returned by accept on sub-listeners. @@ -38,6 +39,7 @@ type multiListener struct { connCh chan connErrPair // stopCh communicates from parent to child listeners. stopCh chan struct{} + closed atomic.Bool } // compile time check to ensure *multiListener implements net.Listener @@ -150,10 +152,8 @@ func (ml *multiListener) Accept() (net.Conn, error) { // the go-routines to exit. func (ml *multiListener) Close() error { // Make sure this can be called repeatedly without explosions. - select { - case <-ml.stopCh: + if !ml.closed.CompareAndSwap(false, true) { return fmt.Errorf("use of closed network connection") - default: } // Tell all sub-listeners to stop. diff --git a/vendor/modules.txt b/vendor/modules.txt index 050891ed18..922becb56c 100644 --- a/vendor/modules.txt +++ b/vendor/modules.txt @@ -16,27 +16,63 @@ github.com/cespare/xxhash/v2 # github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc ## explicit github.com/davecgh/go-spew/spew -# github.com/emicklei/go-restful/v3 v3.12.2 +# github.com/emicklei/go-restful/v3 v3.13.0 ## explicit; go 1.13 github.com/emicklei/go-restful/v3 github.com/emicklei/go-restful/v3/log # github.com/fxamacker/cbor/v2 v2.9.0 ## explicit; go 1.20 github.com/fxamacker/cbor/v2 -# github.com/go-logr/logr v1.4.2 +# github.com/go-logr/logr v1.4.3 ## explicit; go 1.18 github.com/go-logr/logr github.com/go-logr/logr/funcr -# github.com/go-openapi/jsonpointer v0.21.0 -## explicit; go 1.20 +# github.com/go-openapi/jsonpointer v0.22.1 +## explicit; go 1.24.0 github.com/go-openapi/jsonpointer -# github.com/go-openapi/jsonreference v0.20.2 -## explicit; go 1.13 +# github.com/go-openapi/jsonreference v0.21.2 +## explicit; go 1.24.0 github.com/go-openapi/jsonreference github.com/go-openapi/jsonreference/internal -# github.com/go-openapi/swag v0.23.0 -## explicit; go 1.20 +# github.com/go-openapi/swag v0.25.1 +## explicit; go 1.24.0 github.com/go-openapi/swag +# github.com/go-openapi/swag/cmdutils v0.25.1 +## explicit; go 1.24.0 +github.com/go-openapi/swag/cmdutils +# github.com/go-openapi/swag/conv v0.25.1 +## explicit; go 1.24.0 +github.com/go-openapi/swag/conv +# github.com/go-openapi/swag/fileutils v0.25.1 +## explicit; go 1.24.0 +github.com/go-openapi/swag/fileutils +# github.com/go-openapi/swag/jsonname v0.25.1 +## explicit; go 1.24.0 +github.com/go-openapi/swag/jsonname +# github.com/go-openapi/swag/jsonutils v0.25.1 +## explicit; go 1.24.0 +github.com/go-openapi/swag/jsonutils +github.com/go-openapi/swag/jsonutils/adapters +github.com/go-openapi/swag/jsonutils/adapters/ifaces +github.com/go-openapi/swag/jsonutils/adapters/stdlib/json +# github.com/go-openapi/swag/loading v0.25.1 +## explicit; go 1.24.0 +github.com/go-openapi/swag/loading +# github.com/go-openapi/swag/mangling v0.25.1 +## explicit; go 1.24.0 +github.com/go-openapi/swag/mangling +# github.com/go-openapi/swag/netutils v0.25.1 +## explicit; go 1.24.0 +github.com/go-openapi/swag/netutils +# github.com/go-openapi/swag/stringutils v0.25.1 +## explicit; go 1.24.0 +github.com/go-openapi/swag/stringutils +# github.com/go-openapi/swag/typeutils v0.25.1 +## explicit; go 1.24.0 +github.com/go-openapi/swag/typeutils +# github.com/go-openapi/swag/yamlutils v0.25.1 +## explicit; go 1.24.0 +github.com/go-openapi/swag/yamlutils # github.com/go-task/slim-sprig/v3 v3.0.0 ## explicit; go 1.20 github.com/go-task/slim-sprig/v3 @@ -91,20 +127,12 @@ github.com/google/uuid # github.com/inconshreveable/mousetrap v1.1.0 ## explicit; go 1.18 github.com/inconshreveable/mousetrap -# github.com/josharian/intern v1.0.0 -## explicit; go 1.5 -github.com/josharian/intern # github.com/jpillora/backoff v1.0.0 ## explicit; go 1.13 github.com/jpillora/backoff # github.com/json-iterator/go v1.1.12 ## explicit; go 1.12 github.com/json-iterator/go -# github.com/mailru/easyjson v0.7.7 -## explicit; go 1.12 -github.com/mailru/easyjson/buffer -github.com/mailru/easyjson/jlexer -github.com/mailru/easyjson/jwriter # github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd ## explicit github.com/modern-go/concurrent @@ -117,7 +145,7 @@ github.com/munnerz/goautoneg # github.com/mwitkow/go-conntrack v0.0.0-20190716064945-2f068394615f ## explicit github.com/mwitkow/go-conntrack -# github.com/onsi/ginkgo/v2 v2.21.0 => github.com/openshift/onsi-ginkgo/v2 v2.6.1-0.20241205171354-8006f302fd12 +# github.com/onsi/ginkgo/v2 v2.22.0 => github.com/openshift/onsi-ginkgo/v2 v2.6.1-0.20241205171354-8006f302fd12 ## explicit; go 1.22.0 github.com/onsi/ginkgo/v2 github.com/onsi/ginkgo/v2/config @@ -139,13 +167,14 @@ github.com/onsi/ginkgo/v2/internal/parallel_support github.com/onsi/ginkgo/v2/internal/testingtproxy github.com/onsi/ginkgo/v2/reporters github.com/onsi/ginkgo/v2/types -# github.com/onsi/gomega v1.35.1 +# github.com/onsi/gomega v1.36.1 ## explicit; go 1.22 github.com/onsi/gomega github.com/onsi/gomega/format github.com/onsi/gomega/internal github.com/onsi/gomega/internal/gutil github.com/onsi/gomega/matchers +github.com/onsi/gomega/matchers/internal/miter github.com/onsi/gomega/matchers/support/goraph/bipartitegraph github.com/onsi/gomega/matchers/support/goraph/edge github.com/onsi/gomega/matchers/support/goraph/node @@ -273,6 +302,17 @@ github.com/pkg/errors # github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 ## explicit github.com/pmezard/go-difflib/difflib +# github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring v0.88.1 +## explicit; go 1.24.0 +github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring +github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring/v1 +github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring/v1alpha1 +github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring/v1beta1 +# github.com/prometheus-operator/prometheus-operator/pkg/client v0.88.1 +## explicit; go 1.24.0 +github.com/prometheus-operator/prometheus-operator/pkg/client/applyconfiguration/monitoring/v1 +github.com/prometheus-operator/prometheus-operator/pkg/client/versioned/scheme +github.com/prometheus-operator/prometheus-operator/pkg/client/versioned/typed/monitoring/v1 # github.com/prometheus/client_golang v1.22.0 ## explicit; go 1.22 github.com/prometheus/client_golang/api @@ -327,7 +367,7 @@ go.opentelemetry.io/otel/semconv/v1.26.0 go.opentelemetry.io/otel/trace go.opentelemetry.io/otel/trace/embedded go.opentelemetry.io/otel/trace/internal/telemetry -# go.yaml.in/yaml/v2 v2.4.2 +# go.yaml.in/yaml/v2 v2.4.3 ## explicit; go 1.15 go.yaml.in/yaml/v2 # go.yaml.in/yaml/v3 v3.0.4 @@ -360,8 +400,8 @@ golang.org/x/net/idna golang.org/x/net/internal/httpcommon golang.org/x/net/internal/timeseries golang.org/x/net/trace -# golang.org/x/oauth2 v0.27.0 -## explicit; go 1.23.0 +# golang.org/x/oauth2 v0.31.0 +## explicit; go 1.24.0 golang.org/x/oauth2 golang.org/x/oauth2/clientcredentials golang.org/x/oauth2/internal @@ -395,8 +435,8 @@ golang.org/x/text/secure/bidirule golang.org/x/text/transform golang.org/x/text/unicode/bidi golang.org/x/text/unicode/norm -# golang.org/x/time v0.9.0 -## explicit; go 1.18 +# golang.org/x/time v0.13.0 +## explicit; go 1.24.0 golang.org/x/time/rate # golang.org/x/tools v0.38.0 ## explicit; go 1.24.0 @@ -409,8 +449,8 @@ google.golang.org/genproto/googleapis/api/expr/v1alpha1 # google.golang.org/genproto/googleapis/rpc v0.0.0-20250303144028-a0af3efb3deb ## explicit; go 1.23.0 google.golang.org/genproto/googleapis/rpc/status -# google.golang.org/protobuf v1.36.5 -## explicit; go 1.21 +# google.golang.org/protobuf v1.36.10 +## explicit; go 1.23 google.golang.org/protobuf/encoding/protodelim google.golang.org/protobuf/encoding/protojson google.golang.org/protobuf/encoding/prototext @@ -452,7 +492,7 @@ google.golang.org/protobuf/types/known/emptypb google.golang.org/protobuf/types/known/structpb google.golang.org/protobuf/types/known/timestamppb google.golang.org/protobuf/types/known/wrapperspb -# gopkg.in/evanphx/json-patch.v4 v4.12.0 +# gopkg.in/evanphx/json-patch.v4 v4.13.0 ## explicit gopkg.in/evanphx/json-patch.v4 # gopkg.in/fsnotify.v1 v1.4.7 @@ -467,7 +507,7 @@ gopkg.in/yaml.v2 # gopkg.in/yaml.v3 v3.0.1 ## explicit gopkg.in/yaml.v3 -# k8s.io/api v0.34.1 +# k8s.io/api v0.34.3 ## explicit; go 1.24.0 k8s.io/api/admissionregistration/v1 k8s.io/api/admissionregistration/v1alpha1 @@ -527,7 +567,7 @@ k8s.io/api/storage/v1 k8s.io/api/storage/v1alpha1 k8s.io/api/storage/v1beta1 k8s.io/api/storagemigration/v1alpha1 -# k8s.io/apiextensions-apiserver v0.34.1 +# k8s.io/apiextensions-apiserver v0.34.3 ## explicit; go 1.24.0 k8s.io/apiextensions-apiserver/pkg/apis/apiextensions k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1 @@ -535,7 +575,7 @@ k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1 k8s.io/apiextensions-apiserver/pkg/client/applyconfiguration/apiextensions/v1 k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset/scheme k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset/typed/apiextensions/v1 -# k8s.io/apimachinery v0.34.1 +# k8s.io/apimachinery v0.34.3 ## explicit; go 1.24.0 k8s.io/apimachinery/pkg/api/equality k8s.io/apimachinery/pkg/api/errors @@ -596,10 +636,10 @@ k8s.io/apimachinery/pkg/version k8s.io/apimachinery/pkg/watch k8s.io/apimachinery/third_party/forked/golang/json k8s.io/apimachinery/third_party/forked/golang/reflect -# k8s.io/apiserver v0.34.1 +# k8s.io/apiserver v0.34.3 ## explicit; go 1.24.0 k8s.io/apiserver/pkg/authentication/user -# k8s.io/client-go v0.34.1 +# k8s.io/client-go v0.34.3 ## explicit; go 1.24.0 k8s.io/client-go/applyconfigurations k8s.io/client-go/applyconfigurations/admissionregistration/v1 @@ -937,7 +977,7 @@ k8s.io/client-go/util/homedir k8s.io/client-go/util/keyutil k8s.io/client-go/util/retry k8s.io/client-go/util/workqueue -# k8s.io/component-base v0.34.1 +# k8s.io/component-base v0.34.3 ## explicit; go 1.24.0 k8s.io/component-base/metrics k8s.io/component-base/metrics/legacyregistry @@ -959,8 +999,8 @@ k8s.io/kube-aggregator/pkg/apis/apiregistration/v1 k8s.io/kube-aggregator/pkg/apis/apiregistration/v1beta1 k8s.io/kube-aggregator/pkg/client/clientset_generated/clientset/scheme k8s.io/kube-aggregator/pkg/client/clientset_generated/clientset/typed/apiregistration/v1 -# k8s.io/kube-openapi v0.0.0-20250710124328-f3f2b991d03b -## explicit; go 1.23 +# k8s.io/kube-openapi v0.0.0-20250910181357-589584f1c912 +## explicit; go 1.23.0 k8s.io/kube-openapi/pkg/cached k8s.io/kube-openapi/pkg/common k8s.io/kube-openapi/pkg/handler3 @@ -970,7 +1010,7 @@ k8s.io/kube-openapi/pkg/schemaconv k8s.io/kube-openapi/pkg/spec3 k8s.io/kube-openapi/pkg/util/proto k8s.io/kube-openapi/pkg/validation/spec -# k8s.io/utils v0.0.0-20250604170112-4c0f3b243397 +# k8s.io/utils v0.0.0-20251002143259-bc988d571ff4 ## explicit; go 1.18 k8s.io/utils/buffer k8s.io/utils/clock @@ -980,10 +1020,11 @@ k8s.io/utils/lru k8s.io/utils/net k8s.io/utils/ptr k8s.io/utils/trace -# sigs.k8s.io/controller-runtime v0.12.1 -## explicit; go 1.17 +# sigs.k8s.io/controller-runtime v0.22.3 +## explicit; go 1.24.0 +sigs.k8s.io/controller-runtime/pkg/conversion sigs.k8s.io/controller-runtime/pkg/scheme -# sigs.k8s.io/json v0.0.0-20241014173422-cfa47c3a1cc8 +# sigs.k8s.io/json v0.0.0-20250730193827-2d320260d730 ## explicit; go 1.23 sigs.k8s.io/json sigs.k8s.io/json/internal/golang/encoding/json diff --git a/vendor/sigs.k8s.io/controller-runtime/pkg/conversion/conversion.go b/vendor/sigs.k8s.io/controller-runtime/pkg/conversion/conversion.go new file mode 100644 index 0000000000..da32ab48e4 --- /dev/null +++ b/vendor/sigs.k8s.io/controller-runtime/pkg/conversion/conversion.go @@ -0,0 +1,40 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +/* +Package conversion provides interface definitions that an API Type needs to +implement for it to be supported by the generic conversion webhook handler +defined under pkg/webhook/conversion. +*/ +package conversion + +import "k8s.io/apimachinery/pkg/runtime" + +// Convertible defines capability of a type to convertible i.e. it can be converted to/from a hub type. +type Convertible interface { + runtime.Object + ConvertTo(dst Hub) error + ConvertFrom(src Hub) error +} + +// Hub marks that a given type is the hub type for conversion. This means that +// all conversions will first convert to the hub type, then convert from the hub +// type to the destination type. All types besides the hub type should implement +// Convertible. +type Hub interface { + runtime.Object + Hub() +} diff --git a/vendor/sigs.k8s.io/controller-runtime/pkg/scheme/scheme.go b/vendor/sigs.k8s.io/controller-runtime/pkg/scheme/scheme.go index 9dc93a9b21..55ebe21773 100644 --- a/vendor/sigs.k8s.io/controller-runtime/pkg/scheme/scheme.go +++ b/vendor/sigs.k8s.io/controller-runtime/pkg/scheme/scheme.go @@ -21,37 +21,36 @@ limitations under the License. // Each API group should define a utility function // called AddToScheme for adding its types to a Scheme: // -// // in package myapigroupv1... -// var ( -// SchemeGroupVersion = schema.GroupVersion{Group: "my.api.group", Version: "v1"} -// SchemeBuilder = &scheme.Builder{GroupVersion: SchemeGroupVersion} -// AddToScheme = SchemeBuilder.AddToScheme -// ) +// // in package myapigroupv1... +// var ( +// SchemeGroupVersion = schema.GroupVersion{Group: "my.api.group", Version: "v1"} +// SchemeBuilder = &scheme.Builder{GroupVersion: SchemeGroupVersion} +// AddToScheme = SchemeBuilder.AddToScheme +// ) // -// func init() { -// SchemeBuilder.Register(&MyType{}, &MyTypeList) -// } -// var ( -// scheme *runtime.Scheme = runtime.NewScheme() -// ) +// func init() { +// SchemeBuilder.Register(&MyType{}, &MyTypeList) +// } +// var ( +// scheme *runtime.Scheme = runtime.NewScheme() +// ) // // This also true of the built-in Kubernetes types. Then, in the entrypoint for // your manager, assemble the scheme containing exactly the types you need, // panicing if scheme registration failed. For instance, if our controller needs // types from the core/v1 API group (e.g. Pod), plus types from my.api.group/v1: // -// func init() { -// utilruntime.Must(myapigroupv1.AddToScheme(scheme)) -// utilruntime.Must(kubernetesscheme.AddToScheme(scheme)) -// } -// -// func main() { -// mgr := controllers.NewManager(context.Background(), controllers.GetConfigOrDie(), manager.Options{ -// Scheme: scheme, -// }) -// // ... -// } +// func init() { +// utilruntime.Must(myapigroupv1.AddToScheme(scheme)) +// utilruntime.Must(kubernetesscheme.AddToScheme(scheme)) +// } // +// func main() { +// mgr := controllers.NewManager(context.Background(), controllers.GetConfigOrDie(), manager.Options{ +// Scheme: scheme, +// }) +// // ... +// } package scheme import ( diff --git a/vendor/sigs.k8s.io/json/internal/golang/encoding/json/decode.go b/vendor/sigs.k8s.io/json/internal/golang/encoding/json/decode.go index d538ac119b..3fe528bbf3 100644 --- a/vendor/sigs.k8s.io/json/internal/golang/encoding/json/decode.go +++ b/vendor/sigs.k8s.io/json/internal/golang/encoding/json/decode.go @@ -52,8 +52,8 @@ import ( // - bool, for JSON booleans // - float64, for JSON numbers // - string, for JSON strings -// - []interface{}, for JSON arrays -// - map[string]interface{}, for JSON objects +// - []any, for JSON arrays +// - map[string]any, for JSON objects // - nil for JSON null // // To unmarshal a JSON array into a slice, Unmarshal resets the slice length @@ -117,9 +117,6 @@ func Unmarshal(data []byte, v any, opts ...UnmarshalOpt) error { // The input can be assumed to be a valid encoding of // a JSON value. UnmarshalJSON must copy the JSON data // if it wishes to retain the data after returning. -// -// By convention, to approximate the behavior of [Unmarshal] itself, -// Unmarshalers implement UnmarshalJSON([]byte("null")) as a no-op. type Unmarshaler interface { UnmarshalJSON([]byte) error } @@ -132,7 +129,7 @@ type UnmarshalTypeError struct { Type reflect.Type // type of Go value it could not be assigned to Offset int64 // error occurred after reading Offset bytes Struct string // name of the struct type containing the field - Field string // the full path from root node to the field + Field string // the full path from root node to the field, include embedded struct } func (e *UnmarshalTypeError) Error() string { @@ -281,7 +278,11 @@ func (d *decodeState) addErrorContext(err error) error { switch err := err.(type) { case *UnmarshalTypeError: err.Struct = d.errorContext.Struct.Name() - err.Field = strings.Join(d.errorContext.FieldStack, ".") + fieldStack := d.errorContext.FieldStack + if err.Field != "" { + fieldStack = append(fieldStack, err.Field) + } + err.Field = strings.Join(fieldStack, ".") } } return err @@ -492,9 +493,9 @@ func indirect(v reflect.Value, decodingNull bool) (Unmarshaler, encoding.TextUnm } // Prevent infinite loop if v is an interface pointing to its own address: - // var v interface{} + // var v any // v = &v - if v.Elem().Kind() == reflect.Interface && v.Elem().Elem() == v { + if v.Elem().Kind() == reflect.Interface && v.Elem().Elem().Equal(v) { v = v.Elem() break } @@ -784,7 +785,10 @@ func (d *decodeState) object(v reflect.Value) error { } subv = v destring = f.quoted - for _, i := range f.index { + if d.errorContext == nil { + d.errorContext = new(errorContext) + } + for i, ind := range f.index { if subv.Kind() == reflect.Pointer { if subv.IsNil() { // If a struct embeds a pointer to an unexported type, @@ -804,13 +808,16 @@ func (d *decodeState) object(v reflect.Value) error { } subv = subv.Elem() } - subv = subv.Field(i) - } - if d.errorContext == nil { - d.errorContext = new(errorContext) + if i < len(f.index)-1 { + d.errorContext.FieldStack = append( + d.errorContext.FieldStack, + subv.Type().Field(ind).Name, + ) + } + subv = subv.Field(ind) } - d.errorContext.FieldStack = append(d.errorContext.FieldStack, f.name) d.errorContext.Struct = t + d.errorContext.FieldStack = append(d.errorContext.FieldStack, f.name) d.appendStrictFieldStackKey(f.name) } else if d.disallowUnknownFields { d.saveStrictError(d.newFieldError(unknownStrictErrType, string(key))) @@ -1118,7 +1125,7 @@ func (d *decodeState) literalStore(item []byte, v reflect.Value, fromQuoted bool // in an empty interface. They are not strictly necessary, // but they avoid the weight of reflection in this common case. -// valueInterface is like value but returns interface{} +// valueInterface is like value but returns any. func (d *decodeState) valueInterface() (val any) { switch d.opcode { default: @@ -1135,7 +1142,7 @@ func (d *decodeState) valueInterface() (val any) { return } -// arrayInterface is like array but returns []interface{}. +// arrayInterface is like array but returns []any. func (d *decodeState) arrayInterface() []any { origStrictFieldStackLen := len(d.strictFieldStack) defer func() { @@ -1170,7 +1177,7 @@ func (d *decodeState) arrayInterface() []any { return v } -// objectInterface is like object but returns map[string]interface{}. +// objectInterface is like object but returns map[string]any. func (d *decodeState) objectInterface() map[string]any { origStrictFieldStackLen := len(d.strictFieldStack) defer func() { diff --git a/vendor/sigs.k8s.io/json/internal/golang/encoding/json/encode.go b/vendor/sigs.k8s.io/json/internal/golang/encoding/json/encode.go index eb73bff58b..4e3a1a2f10 100644 --- a/vendor/sigs.k8s.io/json/internal/golang/encoding/json/encode.go +++ b/vendor/sigs.k8s.io/json/internal/golang/encoding/json/encode.go @@ -71,8 +71,8 @@ import ( // // The "omitempty" option specifies that the field should be omitted // from the encoding if the field has an empty value, defined as -// false, 0, a nil pointer, a nil interface value, and any empty array, -// slice, map, or string. +// false, 0, a nil pointer, a nil interface value, and any array, +// slice, map, or string of length zero. // // As a special case, if the field tag is "-", the field is always omitted. // Note that a field with name "-" can still be generated using the tag "-,". @@ -98,6 +98,17 @@ import ( // // Field appears in JSON as key "-". // Field int `json:"-,"` // +// The "omitzero" option specifies that the field should be omitted +// from the encoding if the field has a zero value, according to rules: +// +// 1) If the field type has an "IsZero() bool" method, that will be used to +// determine whether the value is zero. +// +// 2) Otherwise, the value is zero if it is the zero value for its type. +// +// If both "omitempty" and "omitzero" are specified, the field will be omitted +// if the value is either empty or zero (or both). +// // The "string" option signals that a field is stored as JSON inside a // JSON-encoded string. It applies only to fields of string, floating point, // integer, or boolean types. This extra level of encoding is sometimes used @@ -690,7 +701,8 @@ FieldLoop: fv = fv.Field(i) } - if f.omitEmpty && isEmptyValue(fv) { + if (f.omitEmpty && isEmptyValue(fv)) || + (f.omitZero && (f.isZero == nil && fv.IsZero() || (f.isZero != nil && f.isZero(fv)))) { continue } e.WriteByte(next) @@ -808,7 +820,7 @@ func (se sliceEncoder) encode(e *encodeState, v reflect.Value, opts encOpts) { // Here we use a struct to memorize the pointer to the first element of the slice // and its length. ptr := struct { - ptr interface{} // always an unsafe.Pointer, but avoids a dependency on package unsafe + ptr any // always an unsafe.Pointer, but avoids a dependency on package unsafe len int }{v.UnsafePointer(), v.Len()} if _, ok := e.ptrSeen[ptr]; ok { @@ -1039,11 +1051,19 @@ type field struct { index []int typ reflect.Type omitEmpty bool + omitZero bool + isZero func(reflect.Value) bool quoted bool encoder encoderFunc } +type isZeroer interface { + IsZero() bool +} + +var isZeroerType = reflect.TypeFor[isZeroer]() + // typeFields returns a list of fields that JSON should recognize for the given type. // The algorithm is breadth-first search over the set of structs to include - the top struct // and then any reachable anonymous structs. @@ -1135,6 +1155,7 @@ func typeFields(t reflect.Type) structFields { index: index, typ: ft, omitEmpty: opts.Contains("omitempty"), + omitZero: opts.Contains("omitzero"), quoted: quoted, } field.nameBytes = []byte(field.name) @@ -1144,6 +1165,40 @@ func typeFields(t reflect.Type) structFields { field.nameEscHTML = `"` + string(nameEscBuf) + `":` field.nameNonEsc = `"` + field.name + `":` + if field.omitZero { + t := sf.Type + // Provide a function that uses a type's IsZero method. + switch { + case t.Kind() == reflect.Interface && t.Implements(isZeroerType): + field.isZero = func(v reflect.Value) bool { + // Avoid panics calling IsZero on a nil interface or + // non-nil interface with nil pointer. + return v.IsNil() || + (v.Elem().Kind() == reflect.Pointer && v.Elem().IsNil()) || + v.Interface().(isZeroer).IsZero() + } + case t.Kind() == reflect.Pointer && t.Implements(isZeroerType): + field.isZero = func(v reflect.Value) bool { + // Avoid panics calling IsZero on nil pointer. + return v.IsNil() || v.Interface().(isZeroer).IsZero() + } + case t.Implements(isZeroerType): + field.isZero = func(v reflect.Value) bool { + return v.Interface().(isZeroer).IsZero() + } + case reflect.PointerTo(t).Implements(isZeroerType): + field.isZero = func(v reflect.Value) bool { + if !v.CanAddr() { + // Temporarily box v so we can take the address. + v2 := reflect.New(v.Type()).Elem() + v2.Set(v) + v = v2 + } + return v.Addr().Interface().(isZeroer).IsZero() + } + } + } + fields = append(fields, field) if count[f.typ] > 1 { // If there were multiple instances, add a second, diff --git a/vendor/sigs.k8s.io/json/internal/golang/encoding/json/stream.go b/vendor/sigs.k8s.io/json/internal/golang/encoding/json/stream.go index 48fc4d9453..cc2108b927 100644 --- a/vendor/sigs.k8s.io/json/internal/golang/encoding/json/stream.go +++ b/vendor/sigs.k8s.io/json/internal/golang/encoding/json/stream.go @@ -31,8 +31,8 @@ func NewDecoder(r io.Reader) *Decoder { return &Decoder{r: r} } -// UseNumber causes the Decoder to unmarshal a number into an interface{} as a -// [Number] instead of as a float64. +// UseNumber causes the Decoder to unmarshal a number into an +// interface value as a [Number] instead of as a float64. func (dec *Decoder) UseNumber() { dec.d.useNumber = true } // DisallowUnknownFields causes the Decoder to return an error when the destination