상세 컨텐츠

본문 제목

k8sobjectsreceiver 사용방법

Splunk/Observability

by 야솔아빠 2023. 10. 5. 22:11

본문

반응형

https://github.com/open-telemetry/opentelemetry-collector-contrib/tree/main/receiver/k8sobjectsreceiver#getting-started

 

위 링크의 가이드 대로 진행을 한다.

Configuration

cat <<EOF | kubectl apply -f -
apiVersion: v1
kind: ConfigMap
metadata:
  name: otelcontribcol
  labels:
    app: otelcontribcol
data:
  config.yaml: |
    receivers:
      k8sobjects:
        objects:
          - name: pods
            mode: pull
          - name: events
            mode: watch
    exporters:
      otlp:
        endpoint: <OTLP_ENDPOINT>
        tls:
          insecure: true

    service:
      pipelines:
        logs:
          receivers: [k8sobjects]
          exporters: [otlp]
EOF

<OTLP_ENDPOINT> 예시:  https://ingest.us1.signalfx.com/ 

즉, realm 을 변경한다.

 

Deployment

# Configurable parameters and default values for splunk-otel-collector.
# This is a YAML-formatted file.
# Declared variables will be passed into templates.

# nameOverride replaces the name of the chart, when this is used to construct
# Kubernetes object names.
nameOverride: ""
# fullnameOverride completely replaces the generated name.
fullnameOverride: ""

################################################################################
# clusterName is a REQUIRED. It can be set to an arbitrary value that identifies
# your K8s cluster. The value will be associated with every trace, metric and
# log as "k8s.cluster.name" attribute.
################################################################################

clusterName: "test-eks-cluster"

################################################################################
# Splunk Cloud / Splunk Enterprise configuration.
################################################################################

# Specify `endpoint` and `token` in order to send data to Splunk Cloud or Splunk
# Enterprise.
splunkPlatform:
  # Required for Splunk Enterprise/Cloud. URL to a Splunk instance to send data
  # to. e.g. "http://X.X.X.X:8088/services/collector/event". Setting this parameter
  # enables Splunk Platform as a destination. Use the /services/collector/event
  # endpoint for proper extraction of fields.
  endpoint: "https://http-inputs-<STACK_NAME>.splunkcloud.com:443/services/collector"
  # Required for Splunk Enterprise/Cloud (if `endpoint` is specified). Splunk
  # Alternatively the token can be provided as a secret.
  # Refer to https://github.com/signalfx/splunk-otel-collector-chart/blob/main/docs/advanced-configuration.md#provide-tokens-as-a-secret
  # HTTP Event Collector token.
  token: "xxxxxxxx"

  # Name of the Splunk event type index targeted. Required when ingesting logs to Splunk Platform.
  index: "dev_k8s_event"
  # Name of the Splunk metric type index targeted. Required when ingesting metrics to Splunk Platform.
  metricsIndex: "dev_k8s_metric"
  # Name of the Splunk event type index targeted. Required when ingesting traces to Splunk Platform.
  tracesIndex: ""
  # Optional. Default value for `source` field.
  source: "kubernetes"
  # Optional. Default value for `sourcetype` field. For container logs, it will
  # be container name.
  sourcetype: ""
  # Maximum HTTP connections to use simultaneously when sending data.
  maxConnections: 200
  # Whether to disable gzip compression over HTTP. Defaults to true.
  disableCompression: true
  # HTTP timeout when sending data. Defaults to 10s.
  timeout: 10s
  # Idle connection timeout. defaults to 10s
  idleConnTimeout: 10s
  # Whether to skip checking the certificate of the HEC endpoint when sending
  # data over HTTPS.
  insecureSkipVerify: false
  # The PEM-format CA certificate for this client.
  # Alternatively the clientCert, clientKey and caFile can be provided as a secret.
  # Refer to https://github.com/signalfx/splunk-otel-collector-chart/blob/main/docs/advanced-configuration.md#provide-tokens-as-a-secret
  # NOTE: The content of the certificate itself should be used here, not the
  #       file path. The certificate will be stored as a secret in kubernetes.
  clientCert: ""
  # The private key for this client.
  # NOTE: The content of the key itself should be used here, not the file path.
  #       The key will be stored as a secret in kubernetes.
  clientKey: ""
  # The PEM-format CA certificate file.
  # NOTE: The content of the file itself should be used here, not the file path.
  #       The file will be stored as a secret in kubernetes.
  caFile: ""

  # Options to disable or enable particular telemetry data types that will be sent to
  # Splunk Platform. Only logs collection is enabled by default.
  logsEnabled: true
  # If you enable metrics collection, make sure that `metricsIndex` is provided as well.
  metricsEnabled: false
  # If you enable traces collection, make sure that `tracesIndex` is provided as well.
  tracesEnabled: false
  # Field name conventions to use. (Only for those who are migrating from Splunk Connect for Kubernetes helm chart)
  fieldNameConvention:
    # Boolean for renaming pod metadata fields to match to Splunk Connect for Kubernetes helm chart.
    renameFieldsSck: false
    # Boolean for keeping Otel convention fields after renaming it
    keepOtelConvention: true

  # Refer to https://github.com/open-telemetry/opentelemetry-collector/blob/main/exporter/exporterhelper/README.md#configuration
  # for detailed examples
  retryOnFailure:
    enabled: true
    # Time to wait after the first failure before retrying; ignored if enabled is false
    initialInterval: 5s
    # The upper bound on backoff; ignored if enabled is false
    maxInterval: 30s
    # The maximum amount of time spent trying to send a batch; ignored if enabled is false
    maxElapsedTime: 300s

  sendingQueue:
    enabled: true
    # Number of consumers that dequeue batches; ignored if enabled is false
    numConsumers: 10
    # Maximum number of batches kept in memory before dropping; ignored if enabled is false
    # User should calculate this as num_seconds * requests_per_second where:
    #   num_seconds is the number of seconds to buffer in case of a backend outage
    #   requests_per_second is the average number of requests per seconds.
    queueSize: 5000

    # This option enables the persistent queue to store data on the disk instead of memory before sending it to the backend.
    # It allows setting higher queue limits and preserving the data across restarts of the collector container.
    # NOTE: The File Storage extension will persist state to the node's local file system.
    # While using the persistent queue it is advised to increase memory limit for agent (agent.resources.limits.memory)
    # to 1Gi.
    # Refer to: https://github.com/signalfx/splunk-otel-collector-chart/blob/main/docs/advanced-configuration.md#data-persistence
    persistentQueue:
      # Specifies whether to persist log/metric/trace data.
      enabled: false
      storagePath: "/var/addon/splunk/exporter_queue"

################################################################################
# Splunk Observability configuration
################################################################################

# Specify `realm` and `accessToken` to telemetry data to Splunk Observability
# Cloud.
splunkObservability:
  # Required for Splunk Observability. Splunk Observability realm to send
  # telemetry data to. Setting this parameter enables Splunk Observability as a
  # destination.
  realm: "us1"
  # Required for Splunk Observability (if `realm` is specified). Splunk
  # Alternatively the accessToken can be provided as a secret.
  # Refer to https://github.com/signalfx/splunk-otel-collector-chart/blob/main/docs/advanced-configuration.md#provide-tokens-as-a-secret
  # Observability org access token.
  accessToken: "xxxx"

  # Optional. Splunk Observability ingest URL, default:
  # "https://ingest.<realm>.signalfx.com".
  ingestUrl: "https://ingest.us1.signalfx.com"
  # Optional. Splunk Observability API URL, default:
  # "https://api.<realm>.signalfx.com".
  apiUrl: "https://api.us1.signalfx.com"

  # Options to disable or enable particular telemetry data types.
  metricsEnabled: true
  tracesEnabled: true
  logsEnabled: false

  # Option to send Kubernetes events to Splunk Observability Infrastructure Monitoring as data events:
  # https://docs.splunk.com/Observability/alerts-detectors-notifications/view-data-events.html
  # To send Kubernetes events to Splunk Observability Log Observer, configure clusterReceiver.k8sObjects
  # and set splunkObservability.logsEnabled to true.
  infrastructureMonitoringEventsEnabled: false

  # This option just enables the shared pipeline for logs and profiling data.
  # There is no active collection of profiling data.
  # Instrumentation libraries must be configured to send it to the collector.
  # If you don't use AlwaysOn Profiling for Splunk APM, you can disable it.
  profilingEnabled: false

################################################################################
# Logs collection engine:
# - `fluentd`: deploy a fluentd sidecar that will collect logs and send them to
#   otel-collector agent for further processing.
# - `otel`: utilize native OpenTelemetry log collection.
#
# `fluentd` will be deprecated soon, so it's recommended to use `otel` instead.
################################################################################

logsEngine: otel

################################################################################
# Cloud provider, if any, the collector is running on. Leave empty for none/other.
# - "aws" (Amazon Web Services)
# - "gcp" (Google Cloud Platform)
# - "azure" (Microsoft Azure)
################################################################################

cloudProvider: "aws"

################################################################################
# Kubernetes distribution being run. Leave empty for other.
# - "aks" (Azure Kubernetes Service)
# - "eks" (Amazon Elastic Kubernetes Service)
# - "eks/fargate" (Amazon Elastic Kubernetes Service with Fargate profiles )
# - "gke" (Google Kubernetes Engine / Standard mode)
# - "gke/autopilot" (Google Kubernetes Engine / Autopilot mode)
# - "openshift" (RedHat OpenShift)
################################################################################

distribution: "eks"

################################################################################
# Optional "environment" parameter that will be added to all the telemetry
# data (traces/logs/metrics) as an attribute. It will allow Splunk Observability
# users to investigate data coming from different source separately.
# See: https://docs.splunk.com/observability/apm/set-up-apm/environments.html#setting-the-deployment-environment-span-tag
################################################################################

# environment: production

################################################################################
# Optional: Automatic detection of additional metric sources.
# Set autodetect.prometheus=true if you want the otel-collector agent to scrape
# prometheus metrics from pods that have prometheus-style annotations like
# "prometheus.io/scrape".
# Set autodetect.istio=true in istio environment.
################################################################################

autodetect:
  prometheus: false
  # This option is recommended for istio environments. It does the following things:
  # - Enables scraping istio control plane metrics from Promethes endpoints.
  # - Add a `service.name` resource attribute to logs with the same value as istio generates for
  #   traces to enable correlation between logs and traces usign this attribute.
  istio: false

################################################################################
# Optional: Configuration for additional metadata that will be added to all the
# telemetry as extra attributes.
# IMPORTANT: Additional attributes configured with `fromLabels` and
# `fromAttributes` options are only applied to traces and logs. Pod labels are
# always sent to Splunk Observability (if enabled) as metric properties.
################################################################################

extraAttributes:

  # Labels that will be collected from k8s pods (or namespaces) (in case they are set)
  # and added as extra attributes to the telemetry in the following format:
  # k8s.<pod|namespace>.labels.<label_name>: <label_value>
  #  For example, if you want to collect "my_key" label from your namespaces, you could use the following:
  #  fromLabels:
  #    - key: my_key
  #      from: namespace
  #
  #  If you want to change the default attribute name `k8s.pod.labels.<label_name>`, you could do that using a `tag_name` field:
  #  fromLabels:
  #    - key: my_key
  #      tag_name: my_tag
  #      from: pod
  #
  #  `key_regex` field can be used to get a specific set of labels that match a regex.
  #  If `key_regex` used is used, the `key` field accepts regexp matching groups.
  #  The following example will fetch all the pod labels and propagate them to the attributes as is,
  #  without "k8s.pod.labels." prefix. "$" from the matching group must be escaped as "$$".
  #  fromLabels:
  #    - key_regex: (.*)
  #      from: pod
  #      tag_name: "$$1"
  fromLabels:
    - key: app

  # Annotations that will be collected from k8s pods (or namespaces) (in case they are set)
  # and added as extra attributes to the telemetry in the following format:
  # k8s.<pod|namespace>.annotations.<annotation_name>: <annotation_value>
  # fromAnnotations uses the same extraction rules as fromLabels option so refer examples from the fromLabels option.
  fromAnnotations: []

  # List of hardcoded key/value pairs that will be added as attributes to
  # all the telemetry.
  custom: []
    # - name: "account_id"
    #   value: "1234567890"

################################################################################
# OPTIONAL CONFIGURATIONS OF PARTICULAR O11Y COLLECTOR COMPONENTS
################################################################################

################################################################################
# OpenTelemetry collector running as an deamonset agent on every node.
# It collects metrics and traces and send them to Signalfx backend.
################################################################################

agent:
  enabled: true

  # Metric collection from k8s control plane components.
  # For control plane configuration details see: docs/advanced-configuration.md#control-plane-metrics
  controlPlaneMetrics:
    apiserver:
      # Specifies whether to collect apiserver metrics.
      enabled: true
    controllerManager:
      # Specifies whether to collect controller manager metrics.
      enabled: true
    coredns:
      # Specifies whether to collect coredns metrics.
      enabled: true
    etcd:
      # Specifies whether to collect etcd metrics.
      # For set up etcd metrics details see: docs/advanced-configuration.md#setting-up-etcd-metrics
      enabled: false
      secret:
        # The name of the secret the helm chart will create (if name is empty the default name is used) or the name
        # of a secret that the user created (empty names are not valid for user created secrets).
        name: ""
        # Option for creating a new secret or using an existing one.
        # When secret.create=true, a new kubernetes secret will be created by the helm chart that will contain the
        # values from clientCert, clientKey, and caFile.
        # When secret.create=false, the user must set secret.name to a name of a k8s secret the user created.
        create: false
        # Used when secret.create=true. The PEM-format CA certificate for the etcd client.
        # NOTE: The content of the certificate itself should be used here, not the
        #       file path. The certificate will be stored as a secret in kubernetes.
        clientCert: ""
        # Used when secret.create=true. The private key for the etcd client.
        # NOTE: The content of the key itself should be used here, not the file path.
        #       The key will be stored as a secret in kubernetes.
        clientKey: ""
        # Optional. Used when secret.create=true and skipVerify=false. The PEM-format CA certificate file.
        # NOTE: The content of the file itself should be used here, not the file path.
        #       The file will be stored as a secret in kubernetes.
        caFile: ""
      # Specifies whether the etcd's TLS cert will be verified. If set to false, a CA certificate must be made
      # available as part of the etcd secret to verify the TLS cert with.
      skipVerify: true
    proxy:
      # Specifies whether to collect proxy metrics.
      enabled: true
    scheduler:
      # Specifies whether to collect scheduler metrics.
      enabled: true

  # The ports to be exposed by the agent to the host.
  # Make sure that only necessary ports are exposed, <hostIP, hostPort, protocol> combination must
  # be unique across all the nodes in k8s cluster. Any port can be disabled,
  # For example to disable zipkin ports set `agent.ports.zipkin: null`.
  ports:
    otlp:
      containerPort: 4317
      hostPort: 4317
      protocol: TCP
      enabled_for: [traces, metrics, logs, profiling]
    otlp-http:
      containerPort: 4318
      protocol: TCP
      enabled_for: [metrics, traces, logs, profiling]
    otlp-http-old:
      containerPort: 55681
      protocol: TCP
      enabled_for: [metrics, traces, logs]
    sfx-forwarder:
      containerPort: 9080
      hostPort: 9080
      protocol: TCP
      enabled_for: [traces]
    zipkin:
      containerPort: 9411
      hostPort: 9411
      protocol: TCP
      enabled_for: [traces]
    jaeger-thrift:
      containerPort: 14268
      hostPort: 14268
      protocol: TCP
      enabled_for: [traces]
    jaeger-grpc:
      containerPort: 14250
      hostPort: 14250
      protocol: TCP
      enabled_for: [traces]
    fluentforward:
      containerPort: 8006
      hostPort: 8006
      protocol: TCP
      enabled_for: [logs]
    signalfx:
      containerPort: 9943
      hostPort: 9943
      protocol: TCP
      enabled_for: [metrics]

  resources:
    limits:
      cpu: 200m
      # This value is being used as a source for default memory_limiter processor configurations
      memory: 500Mi

  # To collect container logs and journald logs, it will run the agent as a root user.
  # To run it as non root user, uncomment below `securityContext` options.
  # Setting runAsUser and runAsGroup to a non root user enables an init container that patches group
  # permissions of container logs directories on the host filesystem to make logs readable by this non root user.

  securityContext: {}
  #   runAsUser: 20000
  #   runAsGroup: 20000

  # Specifies DaemonSet update strategy.
  # Possible values: "OnDelete" and "RollingUpdate".
  updateStrategy: RollingUpdate

  # Specifies the maximum of pods that can be unavailable during update process.
  # Applicable only when updateStrategy is set to "RollingUpdate".
  # Can be an absolute number or a percentage. The default is 1.
  maxUnavailable: 1

  service:
    # create a service for the agents with a local internalTrafficPolicy
    # so that agent pods can be discovered via dns etc
    enabled: false

  # OTel agent annotations
  annotations: {}
  podAnnotations: {}

  # OTel agent extra pod labels
  podLabels: {}

  # Extra enviroment variables to be set in the OTel agent container
  extraEnvs: []

  # Extra volumes to be mounted to the agent daemonset.
  # The volumes will be available for both OTel agent and fluentd containers.
  extraVolumes: []
  extraVolumeMounts: []

  # Enable or disable features of the agent.
  featureGates: ""

  # OpenTelemetry Collector configuration for otel-agent daemonset can be overriden in this field.
  # Default configuration defined in templates/config/_otel-agent.tpl
  # Any additional fields will be merged into the defaults,
  # existing fields can be disabled by setting them to null value.
  config: {}

################################################################################
# OpenTelemetry Kubernetes cluster receiver
# This is an extra 1-replica deployment of Open-temlemetry collector used
# specifically for collecting metrics from kubernetes API.
################################################################################

# Cluster receiver collects cluster level metrics from the Kubernetes API.
# It has to be running on one pod, so it uses its own dedicated deployment with 1 replica.

clusterReceiver:
  enabled: true

  # Need to be adjusted based on size of the monitored cluster
  resources:
    limits:
      cpu: 200m
      memory: 500Mi

  # Scheduling configurations
  nodeSelector: {}
  tolerations: []
  affinity: {}

  # Pod configurations
  securityContext: {}
  terminationGracePeriodSeconds: 600
  priorityClassName: ""

  # k8s cluster receiver collector annotations
  annotations: {}
  podAnnotations: {}

  # This flag enables Kubernetes events collection using OpenTelemetry Kubernetes Events Receiver
  # https://github.com/open-telemetry/opentelemetry-collector-contrib/tree/main/receiver/k8seventsreceiver
  # This option requires `logsEnabled` to be set to `true` for either `splunkObservability` or `splunkPlatform`
  # depending on where you want to send the events. Otherwise this option will not have any effect.
  # The receiver currently is in alpha state which means that events format might change over time.
  # Once the receiver is stabilized, it'll be enabled by default in this helm chart
  eventsEnabled: true

  # Kubernetes objects collection using OpenTelemetry Kubernetes Object Receiver
  # https://github.com/open-telemetry/opentelemetry-collector-contrib/tree/main/receiver/k8sobjectsreceiver
  # This option requires `logsEnabled` to be set to `true` for either `splunkObservability` or `splunkPlatform`
  # depending on where you want to send the events. Otherwise, this option will not have any effect.
  # The receiver currently is in alpha state which means that events format might change over time.
  # Once the receiver is stabilized, it'll be enabled by default in this helm chart

  #
  # == Schema ==
  # ```
  # k8sObjects:
  #   - <objectDefinition>
  # ```
  # Each `objectDefinition` has the following fields:
  # * mode:
  #     define in which way it collects this type of object, either "pull" or "watch".
  #     - "pull" mode will read all objects of this type use the list API at an interval. Default mode.
  #     - "watch" mode will setup a long connection using the watch API to just get updates.
  # * name: [REQUIRED]
  #     name of the object, e.g. `pods`, `namespaces`.
  # * namespace:
  #     only collects objects from the specified namespace, by default it's all namespaces
  # * labelSelector:
  #     select objects by label(s)
  # * fieldSelector:
  #     select objects by field(s)
  # * interval:
  #     the interval at which object is pulled, default 60 seconds.
  #     Only useful for "pull" mode.
  #
  #
  # == Example ==
  # ```
  #  k8sObjects:
  #    - name: pods
  #      mode: pull
  #      label_selector: environment in (production),tier in (frontend)
  #      field_selector: status.phase=Running
  #      interval: 15m
  #    - name: events
  #      mode: watch
  #      group: events.k8s.io
  #      namespaces: [default]
  # ```
  #
  # The configuration format in details is described here:
  # https://github.com/open-telemetry/opentelemetry-collector-contrib/tree/main/receiver/k8sobjectsreceiver
  k8sObjects:
  #  - name: pods
  #    mode: pull
  #    label_selector: environment in (production),tier in (frontend)
  #    field_selector: status.phase=Running
  #    interval: 60s
    - name: events
      mode: watch
      group: events.k8s.io
      namespaces: [default]

  # k8s cluster receiver extra pod labels
  podLabels: {}

  # Extra enviroment variables to be set in the OTel Cluster Receiver container
  extraEnvs: []

  # Extra volumes to be mounted to the k8s cluster receiver container.
  extraVolumes: []
  extraVolumeMounts: []

  # Enable or disable features of the cluster receiver.
  featureGates: ""

  # OpenTelemetry Collector configuration for K8s Cluster Receiver deployment can be overriden in this field.
  # Default configuration defined in templates/config/_otel-k8s-cluster-receiver-config.tpl
  # Any additional fields will be merged into the defaults,
  # existing fields can be disabled by setting them to null value.
  config: {}

#################################################################
# Native OpenTelemetry logs collection
# Disabled by default in favor of fluentd.
# Can be enabled by setting "logsEngine: otel".
# Receiver Documentation: https://github.com/open-telemetry/opentelemetry-collector-contrib/tree/main/receiver/filelogreceiver
# OpenTelemetry Logging Documentation: https://opentelemetry.io/docs/specs/otel/logs
#################################################################

logsCollection:

  # Container logs collection
  containers:
    enabled: true
    # Container runtime. One of `docker`, `cri-o`, or `containerd`
    # Automatically discovered if not set.
    containerRuntime: ""
    # Paths of logfiles to exclude. object type is array:
    # i.e. to exclude `kube-system` namespace,
    # excludePaths: ["/var/log/pods/kube-system_*/*/*.log"]
    excludePaths:
    - "/var/log/pods/amazon-cloudwatch_*/*/*.log"
    - "/var/log/pods/argo-rollouts_*/*/*.log"
    - "/var/log/pods/argocd_*/*/*.log"
    - "/var/log/pods/awx_*/*/*.log"
    - "/var/log/pods/concurency-check_*/*/*.log"
    - "/var/log/pods/default_*/*/*.log"
    - "/var/log/pods/goldilocks_*/*/*.log"
    - "/var/log/pods/datadog_*/*/*.log"
    - "/var/log/pods/jenkins_*/*/*.log"
    - "/var/log/pods/kube-node-lease_*/*/*.log"
    - "/var/log/pods/kube-public_*/*/*.log"
    - "/var/log/pods/istio-system_*/*/*.log"
    - "/var/log/pods/kube-system_*/*/*.log"
    - "/var/log/pods/logging_*/*/*.log"
    - "/var/log/pods/prometheus_*/*/*.log"
    - "/var/log/pods/splunk_*/*/*.log"
    - "/var/log/pods/ui-client_*/*/*.log"
    - "/var/log/pods/whatap-monitoring_*/*/*.log"
    - "/var/log/pods/whatap-agent_*/*/*.log"    
    # Boolean for ingesting the agent's own log
    excludeAgentLogs: true
    # Extra operators for container logs.
    # https://github.com/open-telemetry/opentelemetry-collector-contrib/blob/main/pkg/stanza/docs/operators/README.md
    extraOperators: []

    # Multiline logs processing configuration. Multiline logs that written by containers to stdout
    # are usually broken down into several one-line logs and can be reconstructed with a regex
    # expression that matches the first line of each logs batch. The following operator is being
    # utilized for this purpose:
    # https://github.com/open-telemetry/opentelemetry-log-collection/blob/main/docs/operators/recombine.md
    # By the time of reconstructing a multiline log the following information is available to
    # identify source of the logs: namespace, pod and container names. At least one source
    # identifier has to be specified in for each multiline config.
    # The following example shows how to setup multiline log processing for logs having subsequent
    # log lines written with an offset. Let's say a k8s deployment called "buttercup-app" is
    # scheduled to run in "default" namespace with a java container called "server", and the
    # container produces the following log example:
    #  .........
    #  Exception in thread "main" java.lang.NumberFormatException: For input string: "3.1415"
    #      at java.lang.NumberFormatException.forInputString(NumberFormatException.java:65)
    #      at java.lang.Integer.parseInt(Integer.java:580)
    #      at ExampleCli.parseNumericArgument(ExampleCli.java:47)
    #      at ExampleCli.parseCliOptions(ExampleCli.java:27)
    #      at ExampleCli.main(ExampleCli.java:11)
    #  .........
    # The following sample configuration will handle multiline logs from that specific container:
    # multilineConfigs:
    #   - namespaceName:
    #       value: default
    #     podName:
    #       value: buttercup-app-.*
    #       useRegexp: true
    #     containerName:
    #       value: server
    #     firstEntryRegex: ^[^\s].*
    multilineConfigs: []
    # Set useSplunkIncludeAnnotation flag to `true` to collect logs from pods with `splunk.com/include: true` annotation and ignore others.
    # All other logs will be ignored.
    useSplunkIncludeAnnotation: false
    # maxRecombineLogsSize sets the maximum size in bytes of a message recombined from cri-o, containerd and docker log entries.
    # Set to 0 to remove any size limit.
    maxRecombineLogSize: 1048576

# Configuration for collecting journald logs using otel collector
  journald:
    enabled: false
    # Please update directory path for journald if it's different from below default value "/var/log/journal"
    directory: /var/log/journal
    # List of service units to collect journald logs for and configuration for each.
    units:
      - name: kubelet
        priority: info
      - name: docker
        priority: info
      - name: containerd
        priority: info
    # Route journald logs to its own Splunk Index by specifying the index value below, else leave it blank. Please make sure the index exist in Splunk and is configured to receive HEC traffic. Not applicable to Splunk Observability.
    index: ""

  checkpointPath: "/var/addon/splunk/otel_pos"

  # Files on k8s nodes to tail.
  # Make sure to configure volume mounts properly at `agent.extraVolumes` and `agent.extraVolumeMounts`.
  extraFileLogs: {}
  # Sample configuration to collect Audit logs. Please note hostPath can vary depending on the audit-policy.yaml configuration.
  # extraFileLogs:
  #   filelog/audit-log:
  #     include: [/var/log/kubernetes/apiserver/audit.log]
  #     start_at: beginning
  #     include_file_path: true
  #     include_file_name: false
  #     resource:
  #       com.splunk.source: /var/log/kubernetes/apiserver/audit.log
  #       host.name: 'EXPR(env("K8S_NODE_NAME"))'
  #       com.splunk.sourcetype: kube:apiserver-audit

################################################################################
# Fluentd sidecar configuration for logs collection.
# As of now, this is the recommended way to collect k8s logs,
# but it will be replaced by the native otel logs collection soon.
################################################################################

fluentd:
  resources:
    limits:
      cpu: 500m
      memory: 500Mi
    requests:
      cpu: 100m
      memory: 200Mi

  securityContext:
    runAsUser: 0

  # Extra enviroment variables to be set in the FluentD container
  extraEnvs: []

  config:
    # Configurations for container logs
    containers:
      # Path to root directory of container logs
      path: /var/log
      # Final volume destination of container log symlinks
      pathDest: /var/lib/docker/containers
      # Log format type, "json" or "cri".
      # If omitted (default), the value is detected automatically based on container runtime.
      # "json" is set if docker runtime detected, otherwise it defaults to "cri".
      logFormatType: ""
      # Specify the log format for "cri" logFormatType
      # It can be "%Y-%m-%dT%H:%M:%S.%N%:z" for openshift and "%Y-%m-%dT%H:%M:%S.%NZ" for IBM IKS
      criTimeFormat: "%Y-%m-%dT%H:%M:%S.%N%:z"

    # Directory where to read journald logs. (docker daemon logs, kubelet logs, and anyother specified serivce logs)
    journalLogPath: /run/log/journal

    # Controls the output buffer for the fluentd daemonset
    # Note that, for memory buffer, if `resources.limits.memory` is set,
    # the total buffer size should not bigger than the memory limit, it should also
    # consider the basic memory usage by fluentd itself.
    # All buffer parameters (except Argument) defined in
    # https://docs.fluentd.org/v1.0/articles/buffer-section#parameters
    # can be configured here.
    buffer:
      "@type": memory
      total_limit_size: 600m
      chunk_limit_size: 1m
      chunk_limit_records: 100000
      flush_interval: 5s
      flush_thread_count: 1
      overflow_action: block
      retry_max_times: 3

    # logLevel is to set log level of the Splunk log collector.
    # Available values are: trace, debug, info, warn, error
    logLevel: info

    # path of logfiles, default /var/log/containers/*.log
    path: /var/log/containers/*.log
    # paths of logfiles to exclude. object type is array as per fluentd specification:
    # https://docs.fluentd.org/input/tail#exclude_path
    excludePath: []
    #  - /var/log/containers/kube-svc-redirect*.log
    #  - /var/log/containers/tiller*.log

    # Prefix for pos_file tail source parameter
    # Can be used if you want to run multiple instances of fluentd on the same host
    # https://docs.fluentd.org/input/tail#pos_file-highly-recommended
    posFilePrefix: /var/log/splunk-fluentd

    # `customFilters` defines the custom filters to be used.
    # This section can be used to define custom filters using plugins like https://github.com/splunk/fluent-plugin-jq
    # Its also possible to use other filters like https://www.fluentd.org/plugins#filter
    #
    # The scheme to define a custom filter is:
    #
    # ```
    # <name>:
    #   tag: <fluentd tag for the filter>
    #   type: <fluentd filter type>
    #   body: <definition of the fluentd filter>
    # ```
    #
    # = fluentd tag for the filter =
    # This is the fluentd tag for the record
    #
    # = fluentd filter type =
    # This is the fluentd filter that the user wants to use for record manipulation.
    #
    # = definition of the fluentd filter =
    # This defines the body/logic for using the filter for record manipulation.
    #
    # For example if you want to define a filter which sets cluster_name field to "my_awesome_cluster" you would the following filter
    # <filter tail.containers.**>
    #  @type jq_transformer
    #  jq '.record.cluster_name = "my_awesome_cluster" | .record'
    # </filter>
    # This can be defined in the customFilters section as follows:
    # ```
    # customFilters:
    #   NamespaceSourcetypeFilter:
    #     tag: tail.containers.**
    #     type: jq_transformer
    #     body: jq '.record.cluster_name = "my_awesome_cluster" | .record'
    # ```
    customFilters: {}

    # `logs` defines the source of logs, multiline support, and their sourcetypes.
    #
    # The scheme to define a log is:
    #
    # ```
    # <name>:
    #   from:
    #     <source>
    #   timestampExtraction:
    #     regexp: "<regexp_to_extract_timestamp_from_log>"
    #     format: "<format_of_the_timestamp>"
    #   multiline:
    #     firstline: "<regexp_to_detect_firstline_of_multiline>"
    #     flushInterval: 5s
    #   sourcetype: "<sourcetype_of_logs>"
    # ```
    #
    # = <source> =
    # It supports 3 kinds of sources: journald, file, and container.
    # For `journald` logs, `unit` is required for filtering using _SYSTEMD_UNIT, example:
    # ```
    # docker:
    #   from:
    #     journald:
    #       unit: docker.service
    # ```
    #
    # For `file` logs, `path` is required for specifying where is the log files. Log files are expected in `/var/log`, example:
    # ```
    # docker:
    #   from:
    #     file:
    #       path: /var/log/docker.log
    # ```
    #
    # For `container` logs, `pod` field is required. It represents part of
    # the pod name, can be name of a deployment or replica set. Use "*" to
    # apply the configuration to all pods. Optional `container` value can be
    # used to apply configuration to a particular container.
    # ```
    # kube-apiserver:
    #   from:
    #     pod: kube-apiserver
    #
    # etcd:
    #   from:
    #     pod: etcd-server
    #     container: etcd-container
    # ```
    #
    # = timestamp =
    # `timestampExtraction` defines how to extract timestamp from logs. This *only* works for `file` source.
    # To use `timestampExtraction` you need to define both:
    # - `regexp`: the Regular Expression used to find the timestamp from a log entry.
    #             The timestamp part must be in a `time` named group. E.g.
    #             (?<time>\d{4}-\d{2}-\d{2} \d{2}:\d{2}:\d{2})
    # - `format`: a format string defintes how to parse the timestamp, e.g. "%Y-%m-%d %H:%M:%S".
    #             More details can be find: http://ruby-doc.org/stdlib-2.5.0/libdoc/time/rdoc/Time.html#method-c-strptime
    #
    # = multiline =
    # `multiline` options provide basic multiline support. Two options:
    # - `firstline`: a Regular Expression used to detect the first line of a multiline log.
    # - `flushInterval`: The interval between data flushes, default value: 5s.
    #
    # = sourcetype =
    # sourcetype of each kind of log can be defined using the `sourcetype` field.
    # If `sourcetype` is not defined, `name` will be used.
    #
    # ---
    # Here we have some default timestampExtraction and multiline settings for kubernetes components.
    # So, usually you just need to redefine the source of those components if necessary.
    logs:
      docker:
        from:
          journald:
            unit: docker.service
        timestampExtraction:
          regexp: time="(?<time>\d{4}-\d{2}-\d{2}T[0-2]\d:[0-5]\d:[0-5]\d.\d{9}Z)"
          format: "%Y-%m-%dT%H:%M:%S.%NZ"
        sourcetype: kube:docker
      kubelet: &glog
        from:
          journald:
            unit: kubelet.service
        timestampExtraction:
          regexp: \w(?<time>[0-1]\d[0-3]\d [^\s]*)
          format: "%m%d %H:%M:%S.%N"
        multiline:
          firstline: /^\w[0-1]\d[0-3]\d/
        sourcetype: kube:kubelet
      etcd:
        from:
          pod: etcd-server
          container: etcd-container
        timestampExtraction:
          regexp: (?<time>\d{4}-\d{2}-\d{2} [0-2]\d:[0-5]\d:[0-5]\d\.\d{6})
          format: "%Y-%m-%d %H:%M:%S.%N"
      etcd-minikube:
        from:
          pod: etcd-minikube
          container: etcd
        timestampExtraction:
          regexp: (?<time>\d{4}-\d{2}-\d{2} [0-2]\d:[0-5]\d:[0-5]\d\.\d{6})
          format: "%Y-%m-%d %H:%M:%S.%N"
      etcd-events:
        from:
          pod: etcd-server-events
          container: etcd-container
        timestampExtraction:
          regexp: (?<time>\d{4}-[0-1]\d-[0-3]\d [0-2]\d:[0-5]\d:[0-5]\d\.\d{6})
          format: "%Y-%m-%d %H:%M:%S.%N"
      kube-apiserver:
        <<: *glog
        from:
          pod: kube-apiserver
        sourcetype: kube:kube-apiserver
      kube-scheduler:
        <<: *glog
        from:
          pod: kube-scheduler
        sourcetype: kube:kube-scheduler
      kube-controller-manager:
        <<: *glog
        from:
          pod: kube-controller-manager
        sourcetype: kube:kube-controller-manager
      kube-proxy:
        <<: *glog
        from:
          pod: kube-proxy
        sourcetype: kube:kube-proxy
      kubedns:
        <<: *glog
        from:
          pod: kube-dns
        sourcetype: kube:kubedns
      dnsmasq:
        <<: *glog
        from:
          pod: kube-dns
        sourcetype: kube:dnsmasq
      dns-sidecar:
        <<: *glog
        from:
          pod: kube-dns
          container: sidecar
        sourcetype: kube:kubedns-sidecar
      dns-controller:
        <<: *glog
        from:
          pod: dns-controller
        sourcetype: kube:dns-controller
      kube-dns-autoscaler:
        <<: *glog
        from:
          pod: kube-dns-autoscaler
          container: autoscaler
        sourcetype: kube:kube-dns-autoscaler
      kube-audit:
        from:
          file:
            path: /var/log/kube-apiserver-audit.log
        timestampExtraction:
          format: "%Y-%m-%dT%H:%M:%SZ"
        sourcetype: kube:apiserver-audit

################################################################################
# Docker image configuration
################################################################################

image:
  # Secrets to attach to the respective serviceaccount to pull docker images
  imagePullSecrets: []

  fluentd:
    # The registry and name of the fluentd image to pull
    repository: splunk/fluentd-hec
    # The tag of the fluentd image to pull
    tag: 1.3.3
    # The policy that specifies when the user wants the fluentd images to be pulled
    pullPolicy: IfNotPresent

  otelcol:
    # The registry and name of the opentelemetry collector image to pull
    repository: quay.io/signalfx/splunk-otel-collector
    # The tag of the Splunk OTel Collector image, default value is the chart appVersion
    tag: ""
    # The policy that specifies when the user wants the opentelemetry collector images to be pulled
    pullPolicy: IfNotPresent

  # Image to be used by init container that patches log directories on the host, so the collector can read from them as a non-root user.
  # Effective only if `agent.securityContext.runAsUser` and `agent.securityContext.runAsGroup` are set to non-zero values.
  initPatchLogDirs:
    # The registry and name of the Universal Base Image 9 image to pull
    repository: registry.access.redhat.com/ubi9/ubi
    # The tag of the Universal Base Image 9, default value is latest
    tag: ""
    # The policy that specifies when the user wants the Universal Base images to be pulled
    pullPolicy: IfNotPresent


################################################################################
# Extra system configuration
################################################################################

## Limits how many pods may be unavailable due to voluntary disruptions.
## https://kubernetes.io/docs/tasks/run-application/configure-pdb/
podDisruptionBudget: {}
  # Minimum number of pods (as a number or percentage) that must remain available.
  # minAvailable:
  # Maximum number of pods (as a number or percentage) that can be unavailable.
  # maxUnavailable:

serviceAccount:
  # Specifies whether a ServiceAccount should be created
  create: true
  # The name of the ServiceAccount to use.
  # If not set and create is true, a name is generated using the fullname template
  name: ""

  # Service account annotations
  annotations: {}

rbac:
  # Create or use existing RBAC resources
  create: true
  # Specifies additional rules that will be added to the clusterRole.
  customRules: []

# Create or use existing secret if name is empty default name is used
secret:
  create: true
  name: ""
  # Specifies whether secret provided by user should be validated.
  validateSecret: true

# This default tolerations allow the daemonset to be deployed on control-plane
# nodes, so that we can also collect logs and metrics from those nodes.
tolerations:
  - key: node-role.kubernetes.io/master
    effect: NoSchedule
  - key: node-role.kubernetes.io/control-plane
    effect: NoSchedule

# Defines which nodes should be selected to deploy the o11y collector daemonset.
nodeSelector: {}
terminationGracePeriodSeconds: 600

# Defines node affinity to restrict deployment of the o11y collector daemonset.
affinity: {}

# Defines priorityClassName to assign a priority class to pods.
priorityClassName: ""

# This tells the kubelet that it should wait for x seconds before performing the first probe.
# This is required in case you are using windows worker nodes.
# It is recommended to keep it a 60-second window but it depends on cluster specification.
readinessProbe:
  initialDelaySeconds: 0
livenessProbe:
  initialDelaySeconds: 0

# Specifies whether to apply for k8s cluster with windows worker node.
isWindows: false

# Whether to automatically create Openshift SCC or to create it manually.
# NOTE: This config will only be used when distribution=openshift
securityContextConstraints:
  create: true

# Openshift SecurityContextConstraints can be overriden in this field.
# This fields will be merged into the default config that can be found at
# https://github.com/signalfx/splunk-otel-collector-chart/blob/main/helm-charts/splunk-otel-collector/templates/securityContextConstraints.yaml
# NOTE: This config will only be used when distribution=openshift
securityContextConstraintsOverwrite: {}

################################################################################
# OpenTelemetry "collector" k8s deployment configuration.
# This is an additional deployment of Open-telemetry collector that can be used
# to pass traces trough it, make k8s metadata enrichment and batching.
# Another use case is to point tracing instrumentation libraries directly to
# the collector endpoint instead of local agents. The collector running in the
# passthrough mode is recommended for large k8s clusters, disabled by default.
################################################################################

gateway:
  # Defines if collector deployment is enabled
  # Recommended for large k8s clusters, disabled by default.
  enabled: false

  # Number of collector replicas
  replicaCount: 3

  # The ports exposed by the collector container.
  # Any port can be disabled by setting to null.
  # Any changes should be aligned with service.ports configuration below.
  ports:
    otlp:
      containerPort: 4317
      protocol: TCP
      enabled_for: [metrics, traces, logs]
    otlp-http:
      containerPort: 4318
      protocol: TCP
      enabled_for: [metrics, traces, logs]
    otlp-http-old:
      containerPort: 55681
      protocol: TCP
      enabled_for: [metrics, traces, logs]
    jaeger-thrift:
      containerPort: 14268
      protocol: TCP
      enabled_for: [traces]
    jaeger-grpc:
      containerPort: 14250
      protocol: TCP
      enabled_for: [traces]
    zipkin:
      containerPort: 9411
      protocol: TCP
      enabled_for: [traces]
    signalfx:
      containerPort: 9943
      protocol: TCP
      # SignalFx metrics enabled in gateway for all telemetry types since there may be
      # bundled metrics.
      enabled_for: [metrics, traces, logs]
    http-forwarder:
      containerPort: 6060
      protocol: TCP
      # Enabled for all because SignalFx exporter will always send metadata updates when enabled.
      enabled_for: [metrics, traces, logs]

  resources:
    limits:
      cpu: 4
      # Memory limit value is used as a source for default memory_limiter configuration
      memory: 8Gi

  # Scheduling configurations
  nodeSelector: {}
  tolerations: []
  affinity: {}

  # Pod configurations
  securityContext: {}
  terminationGracePeriodSeconds: 600
  priorityClassName: ""

  # OTel collector annotations
  annotations: {}
  podAnnotations: {}

  # OTel collector extra pod labels
  podLabels: {}

  # Extra enviroment variables to be set in the standalone OTel collector container
  extraEnvs: []

  # Extra volumes to be mounted to the OTel Collector container.
  extraVolumes: []
  extraVolumeMounts: []

  # Enable or disable features of the gateway.
  featureGates: ""

  # OpenTelemetry Collector configuration for standalone otel-collector deployment can be overriden in this field.
  # Default configuration defined in config/otel-collector-config.yaml
  # Any additional fields will be merged into the defaults,
  # existing fields can be disabled by setting them to `null`.
  config: {}

################################################################################
# OpenTelemetry service config, used for otel collector deployment.
# Disabled by default
################################################################################

# opentelemetry collector service created only if collector.enabled = true
service:
  # Service type
  type: ClusterIP
  # Service annotations
  annotations: {}

# Default values for splunk-otel-network-explorer.
networkExplorer:
  enabled: false
  images:
    tag: latest-v0.9
    repository: quay.io/signalfx
    pullPolicy: Always

  imagePullSecrets: []

  log:
    console: false
    # possible values: { error | warning | info | debug | trace }
    level: warning

  debug:
    enabled: false
    storeMinidump: false
    sendUnplannedExitMetric: true

  kernelCollector:
    enabled: true
    image:
      name: splunk-network-explorer-kernel-collector
      # tag: latest-v0.9
      # repository: quay.io/signalfx
    nodeSelector: {}
    disableHttpMetrics: false

    serviceAccount:
      create: false
      name: ""

    tolerations:
    - operator: "Exists"
      effect: "NoExecute"
    - operator: "Exists"
      effect: "NoSchedule"

    affinity: {}
    resources: {}

    # uncomment the line below to disable automatic kernel headers fetching
    # fetchKernelHeaders: false

    # uncomment to enable enrichment using Docker metadata
    # useDockerMetadata: true

    # uncomment to enable enrichment using Nomad metadata (https://www.nomadproject.io/)
    # collectNomadMetadata: true

  cloudCollector:
    enabled: false
    image:
      name: splunk-network-explorer-cloud-collector
      # tag: latest-v0.9
      # repository: quay.io/signalfx
    serviceAccount:
      create: false
      name: ""
      iamRole: ""

  k8sCollector:
    enabled: true
    relay:
      image:
        name: splunk-network-explorer-k8s-relay
        # tag: latest-v0.9
        # repository: quay.io/signalfx
    watcher:
      image:
        name: splunk-network-explorer-k8s-watcher
        # tag: latest-v0.9
        # repository: quay.io/signalfx
    serviceAccount:
      create: false
      name: ""

  reducer:
    ingestShards: 1
    matchingShards: 1
    aggregationShards: 1
    disableInternalMetrics: true
    disableMetrics: []
      ### to disable an entire metric category: ###
      # - tcp.all
      # - udp.all
      # - dns.all
      # - http.all
      ### to disable an individual metric: ###
      ### tcp ###
      # - tcp.bytes
      # - tcp.rtt.num_measurements
      # - tcp.active
      # - tcp.rtt.average
      # - tcp.packets
      # - tcp.retrans
      # - tcp.syn_timeouts
      # - tcp.new_sockets
      # - tcp.resets
      ### udp ###
      # - udp.bytes
      # - udp.packets
      # - udp.active
      # - udp.drops
      ### dns ###
      # - dns.client.duration.average
      # - dns.server.duration.average
      # - dns.active_sockets
      # - dns.responses
      # - dns.timeouts
      ### http ##
      # - http.client.duration.average
      # - http.server.duration.average
      # - http.active_sockets
      # - http.status_code
      ### ebpf_net ##
      # - ebpf_net.span_utilization_fraction
      # - ebpf_net.pipeline_metric_bytes_discarded
      # - ebpf_net.codetiming_min_ns
      # - ebpf_net.entrypoint_info
      # - ebpf_net.otlp_grpc.requests_sent
      # - ebpf_net.connections
      # - ebpf_net.rpc_queue_elem_utilization_fraction
      # - ebpf_net.disconnects
      # - ebpf_net.codetiming_avg_ns
      # - ebpf_net.client_handle_pool
      # - ebpf_net.otlp_grpc.successful_requests
      # - ebpf_net.span_utilization
      # - ebpf_net.up
      # - ebpf_net.rpc_queue_buf_utilization_fraction
      # - ebpf_net.collector_log_count
      # - ebpf_net.time_since_last_message_ns
      # - ebpf_net.bpf_log
      # - ebpf_net.codetiming_count
      # - ebpf_net.message
      # - ebpf_net.otlp_grpc.bytes_sent
      # - ebpf_net.pipeline_message_error
      # - ebpf_net.pipeline_metric_bytes_written
      # - ebpf_net.codetiming_max_ns
      # - ebpf_net.span_utilization_max
      # - ebpf_net.client_handle_pool_fraction
      # - ebpf_net.span_utilization_fraction
      # - ebpf_net.rpc_latency_ns
      # - ebpf_net.agg_root_truncation
      # - ebpf_net.clock_offset_ns
      # - ebpf_net.otlp_grpc.metrics_sent
      # - ebpf_net.otlp_grpc.unknown_response_tags
      # - ebpf_net.collector_health
      # - ebpf_net.codetiming_sum_ns
      # - ebpf_net.otlp_grpc.failed_requests
      # - ebpf_net.rpc_queue_buf_utilization
      ### to enable all metrics (including metrics turned off by default): ###
      # - none
    enableMetrics: []
      ### Disable metrics flag is evaluated first and only then enable metric flag is evaluated. ###
      ### to enable an entire metric category: ###
      # - tcp.all
      # - udp.all
      # - dns.all
      # - http.all
      # - ebpf_net.all
      ### to enable an individual metric: ###
      ### tcp ###
      # - tcp.bytes
      # - tcp.rtt.num_measurements
      # - tcp.active
      # - tcp.rtt.average
      # - tcp.packets
      # - tcp.retrans
      # - tcp.syn_timeouts
      # - tcp.new_sockets
      # - tcp.resets
      ### udp ###
      # - udp.bytes
      # - udp.packets
      # - udp.active
      # - udp.drops
      ### dns ###
      # - dns.client.duration.average
      # - dns.server.duration.average
      # - dns.active_sockets
      # - dns.responses
      # - dns.timeouts
      ### http ###
      # - http.client.duration.average
      # - http.server.duration.average
      # - http.active_sockets
      # - http.status_code
      ### ebpf_net ###
      # - ebpf_net.span_utilization_fraction
      # - ebpf_net.pipeline_metric_bytes_discarded
      # - ebpf_net.codetiming_min_ns
      # - ebpf_net.entrypoint_info
      # - ebpf_net.otlp_grpc.requests_sent
      # - ebpf_net.connections
      # - ebpf_net.rpc_queue_elem_utilization_fraction
      # - ebpf_net.disconnects
      # - ebpf_net.codetiming_avg_ns
      # - ebpf_net.client_handle_pool
      # - ebpf_net.otlp_grpc.successful_requests
      # - ebpf_net.span_utilization
      # - ebpf_net.up
      # - ebpf_net.rpc_queue_buf_utilization_fraction
      # - ebpf_net.collector_log_count
      # - ebpf_net.time_since_last_message_ns
      # - ebpf_net.bpf_log
      # - ebpf_net.codetiming_count
      # - ebpf_net.message
      # - ebpf_net.otlp_grpc.bytes_sent
      # - ebpf_net.pipeline_message_error
      # - ebpf_net.pipeline_metric_bytes_written
      # - ebpf_net.codetiming_max_ns
      # - ebpf_net.span_utilization_max
      # - ebpf_net.client_handle_pool_fraction
      # - ebpf_net.span_utilization_fraction
      # - ebpf_net.rpc_latency_ns
      # - ebpf_net.agg_root_truncation
      # - ebpf_net.clock_offset_ns
      # - ebpf_net.otlp_grpc.metrics_sent
      # - ebpf_net.otlp_grpc.unknown_response_tags
      # - ebpf_net.collector_health
      # - ebpf_net.codetiming_sum_ns
      # - ebpf_net.otlp_grpc.failed_requests
      # - ebpf_net.rpc_queue_buf_utilization
    telemetryPort: 7000
    statsPromPort: 7001
    image:
      name: splunk-network-explorer-reducer
      # tag: latest-v0.9
      # repository: quay.io/signalfx
    resources: {}
    nodeSelector: {}
    affinity: {}
    tolerations: []

  rbac:
    create: true

  podSecurityPolicy:
    enabled: true
    annotations: {}
      ## Specify pod annotations
      ## Ref: https://kubernetes.io/docs/concepts/policy/pod-security-policy/#apparmor
      ## Ref: https://kubernetes.io/docs/concepts/policy/pod-security-policy/#seccomp
      ## Ref: https://kubernetes.io/docs/concepts/policy/pod-security-policy/#sysctl
      ##
      # seccomp.security.alpha.kubernetes.io/allowedProfileNames: '*'
      # seccomp.security.alpha.kubernetes.io/defaultProfileName: 'docker/default'
      # apparmor.security.beta.kubernetes.io/defaultProfileName: 'runtime/default'


################################################################################
# Notice: Operator related features should be considered to have an alpha
# maturity level and be experimental. There may be breaking changes or Operator
# features may be replaced entirely with a better alternative in the future.
#
# The OpenTelemetry Operator running as a deployment with a replica count of 1.
# It auto-instruments applications to emit telemetry data.
# Related documentation: https://github.com/signalfx/splunk-otel-collector-chart/blob/main/docs/auto-instrumentation-install.md
# Full list of Helm value configurations: https://artifacthub.io/packages/helm/opentelemetry-helm/opentelemetry-operator?modal=values
################################################################################

operator:
  enabled: false
  # For more details, refer to: https://github.com/open-telemetry/opentelemetry-operator/blob/main/docs/api.md#instrumentation
  instrumentation:
    # Overrides for default instrumentation configurations can be specified here.
    # Source: https://github.com/signalfx/splunk-otel-collector-chart/blob/main/helm-charts/splunk-otel-collector/templates/operator/instrumentation.yaml
    # Rendered Default: https://github.com/signalfx/splunk-otel-collector-chart/blob/main/examples/enable-operator-and-auto-instrumentation/rendered_manifests/operator/instrumentation.yaml
    spec:
      # Optional "endpoint" parameter for exporting data to a specific target.
      # By default, the endpoint will be set to the agent if it's enabled. If the agent is not enabled, the endpoint
      # will default to the gateway, given it is enabled. If neither the agent nor the gateway is enabled, the endpoint
      # must be overridden here.
      # exporter:
        # endpoint: http://$(SPLUNK_OTEL_AGENT):4317
      # Optional "sampler" parameter for enabling trace sampling, see: https://opentelemetry.io/docs/concepts/sdk-configuration/general-sdk-configuration/#otel_traces_sampler
      # sampler:
        # type: traceidratio
        # argument: "0.95"
      # Optional "environment variable" parameters that can configure all instrumentation libraries.
      # If splunkObservability.profilingEnabled=true, environment variables enabling profiling will be added automatically.
      # env:
      # Auto-instrumentation Libraries (Start)
      # Below are configurations for the instrumentation libraries utilized in Auto-instrumentation.
      # Highlights:
      #   - Maturity varies among libraries (e.g., Java is more mature than Go). Check each library's stability here: https://opentelemetry.io/docs/instrumentation/#status-and-releases
      #   - Some libraries may be enabled by default. The current status can be checked here: https://github.com/open-telemetry/opentelemetry-operator#controlling-instrumentation-capabilities
      #   - Splunk provides best-effort support for native OpenTelemetry libraries, while offering full support for its own distributions.
      # Each library supports the following fields:
      #   - repository: Specifies the Docker image repository.
      #   - tag: Indicates the Docker image tag.
      #   - env: (Optional) Allows you to add any additional environment variables.
      java:
        repository: ghcr.io/signalfx/splunk-otel-java/splunk-otel-java
        tag: v1.28.0
      nodejs:
        repository: ghcr.io/open-telemetry/opentelemetry-operator/autoinstrumentation-nodejs
        tag: 0.41.1
      go:
        repository: ghcr.io/open-telemetry/opentelemetry-operator/autoinstrumentation-go
        tag: v0.2.2-alpha
      apache-httpd:
        repository: ghcr.io/open-telemetry/opentelemetry-operator/autoinstrumentation-apache-httpd
        tag: 1.0.3
      python:
        repository: ghcr.io/open-telemetry/opentelemetry-operator/autoinstrumentation-python
        tag: 0.40b0
      dotnet:
        repository: ghcr.io/open-telemetry/opentelemetry-operator/autoinstrumentation-dotnet
        tag: 1.0.0-rc.2
      # Auto-instrumentation Libraries (End)
  admissionWebhooks:
    certManager:
      # Annotate the certificate and issuer to ensure they are created after the cert-manager CRDs have been installed.
      certificateAnnotations:
        "helm.sh/hook": post-install,post-upgrade
        "helm.sh/hook-weight": "1"
      issuerAnnotations:
        "helm.sh/hook": post-install,post-upgrade
        "helm.sh/hook-weight": "1"

# The cert-manager is a CNCF application deployed as a subchart and used for supporting operators that require TLS certificates.
# Full list of Helm value configurations: https://artifacthub.io/packages/helm/cert-manager/cert-manager?modal=values
certmanager:
  enabled: false
  installCRDs: true


################################################################################
# Helm Chart Feature Gates.
# The following feature gates are used to enable/disable features in the Helm chart
# that are not yet ready for general availability.
# Options in this section are not guaranteed to be stable and may change at any time.
################################################################################

featureGates:
  # Use Light Prometheus Receiver for metrics collection from discovered Prometheus endpoints.
  # https://github.com/signalfx/splunk-otel-collector/tree/main/internal/receiver/lightprometheusreceiver
  # Light Prometheus Receiver is optimized for performance and reduced memory footprint.
  # From the other hand, it does not support all Prometheus configuration options.
  useLightPrometheusReceiver: false

이렇게 디플로이 하면 library/otelcontribcol 이미지가 docket hub에 없다라는 에러가 발생한다.

docker search otelcontribcol 명령으로 알맞은 이미지를 찾아서 수정한다.

<<EOF | kubectl apply -f -
apiVersion: apps/v1
kind: Deployment
metadata:
  name: otelcontribcol
  labels:
    app: otelcontribcol
spec:
  replicas: 1
  selector:
    matchLabels:
      app: otelcontribcol
  template:
    metadata:
      labels:
        app: otelcontribcol
    spec:
      serviceAccountName: otelcontribcol
      containers:
      - name: otelcontribcol
        image: holoinsight/otelcontribcol:latest # specify image
        args: ["--config", "/etc/config/config.yaml"]
        volumeMounts:
        - name: config
          mountPath: /etc/config
        imagePullPolicy: IfNotPresent
      volumes:
        - name: config
          configMap:
            name: otelcontribcol
EOF

 

values.yaml

# Configurable parameters and default values for splunk-otel-collector.
# This is a YAML-formatted file.
# Declared variables will be passed into templates.

# nameOverride replaces the name of the chart, when this is used to construct
# Kubernetes object names.
nameOverride: ""
# fullnameOverride completely replaces the generated name.
fullnameOverride: ""

################################################################################
# clusterName is a REQUIRED. It can be set to an arbitrary value that identifies
# your K8s cluster. The value will be associated with every trace, metric and
# log as "k8s.cluster.name" attribute.
################################################################################

clusterName: "test-eks-cluster"

################################################################################
# Splunk Cloud / Splunk Enterprise configuration.
################################################################################

# Specify `endpoint` and `token` in order to send data to Splunk Cloud or Splunk
# Enterprise.
splunkPlatform:
  # Required for Splunk Enterprise/Cloud. URL to a Splunk instance to send data
  # to. e.g. "http://X.X.X.X:8088/services/collector/event". Setting this parameter
  # enables Splunk Platform as a destination. Use the /services/collector/event
  # endpoint for proper extraction of fields.
  endpoint: "https://http-inputs-<STACK_NAME>.splunkcloud.com:443/services/collector"
  # Required for Splunk Enterprise/Cloud (if `endpoint` is specified). Splunk
  # Alternatively the token can be provided as a secret.
  # Refer to https://github.com/signalfx/splunk-otel-collector-chart/blob/main/docs/advanced-configuration.md#provide-tokens-as-a-secret
  # HTTP Event Collector token.
  token: "xxxxxxxx"

  # Name of the Splunk event type index targeted. Required when ingesting logs to Splunk Platform.
  index: "dev_k8s_event"
  # Name of the Splunk metric type index targeted. Required when ingesting metrics to Splunk Platform.
  metricsIndex: "dev_k8s_metric"
  # Name of the Splunk event type index targeted. Required when ingesting traces to Splunk Platform.
  tracesIndex: ""
  # Optional. Default value for `source` field.
  source: "kubernetes"
  # Optional. Default value for `sourcetype` field. For container logs, it will
  # be container name.
  sourcetype: ""
  # Maximum HTTP connections to use simultaneously when sending data.
  maxConnections: 200
  # Whether to disable gzip compression over HTTP. Defaults to true.
  disableCompression: true
  # HTTP timeout when sending data. Defaults to 10s.
  timeout: 10s
  # Idle connection timeout. defaults to 10s
  idleConnTimeout: 10s
  # Whether to skip checking the certificate of the HEC endpoint when sending
  # data over HTTPS.
  insecureSkipVerify: false
  # The PEM-format CA certificate for this client.
  # Alternatively the clientCert, clientKey and caFile can be provided as a secret.
  # Refer to https://github.com/signalfx/splunk-otel-collector-chart/blob/main/docs/advanced-configuration.md#provide-tokens-as-a-secret
  # NOTE: The content of the certificate itself should be used here, not the
  #       file path. The certificate will be stored as a secret in kubernetes.
  clientCert: ""
  # The private key for this client.
  # NOTE: The content of the key itself should be used here, not the file path.
  #       The key will be stored as a secret in kubernetes.
  clientKey: ""
  # The PEM-format CA certificate file.
  # NOTE: The content of the file itself should be used here, not the file path.
  #       The file will be stored as a secret in kubernetes.
  caFile: ""

  # Options to disable or enable particular telemetry data types that will be sent to
  # Splunk Platform. Only logs collection is enabled by default.
  logsEnabled: true
  # If you enable metrics collection, make sure that `metricsIndex` is provided as well.
  metricsEnabled: false
  # If you enable traces collection, make sure that `tracesIndex` is provided as well.
  tracesEnabled: false
  # Field name conventions to use. (Only for those who are migrating from Splunk Connect for Kubernetes helm chart)
  fieldNameConvention:
    # Boolean for renaming pod metadata fields to match to Splunk Connect for Kubernetes helm chart.
    renameFieldsSck: false
    # Boolean for keeping Otel convention fields after renaming it
    keepOtelConvention: true

  # Refer to https://github.com/open-telemetry/opentelemetry-collector/blob/main/exporter/exporterhelper/README.md#configuration
  # for detailed examples
  retryOnFailure:
    enabled: true
    # Time to wait after the first failure before retrying; ignored if enabled is false
    initialInterval: 5s
    # The upper bound on backoff; ignored if enabled is false
    maxInterval: 30s
    # The maximum amount of time spent trying to send a batch; ignored if enabled is false
    maxElapsedTime: 300s

  sendingQueue:
    enabled: true
    # Number of consumers that dequeue batches; ignored if enabled is false
    numConsumers: 10
    # Maximum number of batches kept in memory before dropping; ignored if enabled is false
    # User should calculate this as num_seconds * requests_per_second where:
    #   num_seconds is the number of seconds to buffer in case of a backend outage
    #   requests_per_second is the average number of requests per seconds.
    queueSize: 5000

    # This option enables the persistent queue to store data on the disk instead of memory before sending it to the backend.
    # It allows setting higher queue limits and preserving the data across restarts of the collector container.
    # NOTE: The File Storage extension will persist state to the node's local file system.
    # While using the persistent queue it is advised to increase memory limit for agent (agent.resources.limits.memory)
    # to 1Gi.
    # Refer to: https://github.com/signalfx/splunk-otel-collector-chart/blob/main/docs/advanced-configuration.md#data-persistence
    persistentQueue:
      # Specifies whether to persist log/metric/trace data.
      enabled: false
      storagePath: "/var/addon/splunk/exporter_queue"

################################################################################
# Splunk Observability configuration
################################################################################

# Specify `realm` and `accessToken` to telemetry data to Splunk Observability
# Cloud.
splunkObservability:
  # Required for Splunk Observability. Splunk Observability realm to send
  # telemetry data to. Setting this parameter enables Splunk Observability as a
  # destination.
  realm: "us1"
  # Required for Splunk Observability (if `realm` is specified). Splunk
  # Alternatively the accessToken can be provided as a secret.
  # Refer to https://github.com/signalfx/splunk-otel-collector-chart/blob/main/docs/advanced-configuration.md#provide-tokens-as-a-secret
  # Observability org access token.
  accessToken: "iFIin8JiHqGrPpS6GXkxxg"

  # Optional. Splunk Observability ingest URL, default:
  # "https://ingest.<realm>.signalfx.com".
  ingestUrl: "https://ingest.us1.signalfx.com"
  # Optional. Splunk Observability API URL, default:
  # "https://api.<realm>.signalfx.com".
  apiUrl: "https://api.us1.signalfx.com"

  # Options to disable or enable particular telemetry data types.
  metricsEnabled: true
  tracesEnabled: true
  logsEnabled: false

  # Option to send Kubernetes events to Splunk Observability Infrastructure Monitoring as data events:
  # https://docs.splunk.com/Observability/alerts-detectors-notifications/view-data-events.html
  # To send Kubernetes events to Splunk Observability Log Observer, configure clusterReceiver.k8sObjects
  # and set splunkObservability.logsEnabled to true.
  infrastructureMonitoringEventsEnabled: false

  # This option just enables the shared pipeline for logs and profiling data.
  # There is no active collection of profiling data.
  # Instrumentation libraries must be configured to send it to the collector.
  # If you don't use AlwaysOn Profiling for Splunk APM, you can disable it.
  profilingEnabled: false

################################################################################
# Logs collection engine:
# - `fluentd`: deploy a fluentd sidecar that will collect logs and send them to
#   otel-collector agent for further processing.
# - `otel`: utilize native OpenTelemetry log collection.
#
# `fluentd` will be deprecated soon, so it's recommended to use `otel` instead.
################################################################################

logsEngine: otel

################################################################################
# Cloud provider, if any, the collector is running on. Leave empty for none/other.
# - "aws" (Amazon Web Services)
# - "gcp" (Google Cloud Platform)
# - "azure" (Microsoft Azure)
################################################################################

cloudProvider: "aws"

################################################################################
# Kubernetes distribution being run. Leave empty for other.
# - "aks" (Azure Kubernetes Service)
# - "eks" (Amazon Elastic Kubernetes Service)
# - "eks/fargate" (Amazon Elastic Kubernetes Service with Fargate profiles )
# - "gke" (Google Kubernetes Engine / Standard mode)
# - "gke/autopilot" (Google Kubernetes Engine / Autopilot mode)
# - "openshift" (RedHat OpenShift)
################################################################################

distribution: "eks"

################################################################################
# Optional "environment" parameter that will be added to all the telemetry
# data (traces/logs/metrics) as an attribute. It will allow Splunk Observability
# users to investigate data coming from different source separately.
# See: https://docs.splunk.com/observability/apm/set-up-apm/environments.html#setting-the-deployment-environment-span-tag
################################################################################

# environment: production

################################################################################
# Optional: Automatic detection of additional metric sources.
# Set autodetect.prometheus=true if you want the otel-collector agent to scrape
# prometheus metrics from pods that have prometheus-style annotations like
# "prometheus.io/scrape".
# Set autodetect.istio=true in istio environment.
################################################################################

autodetect:
  prometheus: false
  # This option is recommended for istio environments. It does the following things:
  # - Enables scraping istio control plane metrics from Promethes endpoints.
  # - Add a `service.name` resource attribute to logs with the same value as istio generates for
  #   traces to enable correlation between logs and traces usign this attribute.
  istio: false

################################################################################
# Optional: Configuration for additional metadata that will be added to all the
# telemetry as extra attributes.
# IMPORTANT: Additional attributes configured with `fromLabels` and
# `fromAttributes` options are only applied to traces and logs. Pod labels are
# always sent to Splunk Observability (if enabled) as metric properties.
################################################################################

extraAttributes:

  # Labels that will be collected from k8s pods (or namespaces) (in case they are set)
  # and added as extra attributes to the telemetry in the following format:
  # k8s.<pod|namespace>.labels.<label_name>: <label_value>
  #  For example, if you want to collect "my_key" label from your namespaces, you could use the following:
  #  fromLabels:
  #    - key: my_key
  #      from: namespace
  #
  #  If you want to change the default attribute name `k8s.pod.labels.<label_name>`, you could do that using a `tag_name` field:
  #  fromLabels:
  #    - key: my_key
  #      tag_name: my_tag
  #      from: pod
  #
  #  `key_regex` field can be used to get a specific set of labels that match a regex.
  #  If `key_regex` used is used, the `key` field accepts regexp matching groups.
  #  The following example will fetch all the pod labels and propagate them to the attributes as is,
  #  without "k8s.pod.labels." prefix. "$" from the matching group must be escaped as "$$".
  #  fromLabels:
  #    - key_regex: (.*)
  #      from: pod
  #      tag_name: "$$1"
  fromLabels:
    - key: app

  # Annotations that will be collected from k8s pods (or namespaces) (in case they are set)
  # and added as extra attributes to the telemetry in the following format:
  # k8s.<pod|namespace>.annotations.<annotation_name>: <annotation_value>
  # fromAnnotations uses the same extraction rules as fromLabels option so refer examples from the fromLabels option.
  fromAnnotations: []

  # List of hardcoded key/value pairs that will be added as attributes to
  # all the telemetry.
  custom: []
    # - name: "account_id"
    #   value: "1234567890"

################################################################################
# OPTIONAL CONFIGURATIONS OF PARTICULAR O11Y COLLECTOR COMPONENTS
################################################################################

################################################################################
# OpenTelemetry collector running as an deamonset agent on every node.
# It collects metrics and traces and send them to Signalfx backend.
################################################################################

agent:
  enabled: true

  # Metric collection from k8s control plane components.
  # For control plane configuration details see: docs/advanced-configuration.md#control-plane-metrics
  controlPlaneMetrics:
    apiserver:
      # Specifies whether to collect apiserver metrics.
      enabled: true
    controllerManager:
      # Specifies whether to collect controller manager metrics.
      enabled: true
    coredns:
      # Specifies whether to collect coredns metrics.
      enabled: true
    etcd:
      # Specifies whether to collect etcd metrics.
      # For set up etcd metrics details see: docs/advanced-configuration.md#setting-up-etcd-metrics
      enabled: false
      secret:
        # The name of the secret the helm chart will create (if name is empty the default name is used) or the name
        # of a secret that the user created (empty names are not valid for user created secrets).
        name: ""
        # Option for creating a new secret or using an existing one.
        # When secret.create=true, a new kubernetes secret will be created by the helm chart that will contain the
        # values from clientCert, clientKey, and caFile.
        # When secret.create=false, the user must set secret.name to a name of a k8s secret the user created.
        create: false
        # Used when secret.create=true. The PEM-format CA certificate for the etcd client.
        # NOTE: The content of the certificate itself should be used here, not the
        #       file path. The certificate will be stored as a secret in kubernetes.
        clientCert: ""
        # Used when secret.create=true. The private key for the etcd client.
        # NOTE: The content of the key itself should be used here, not the file path.
        #       The key will be stored as a secret in kubernetes.
        clientKey: ""
        # Optional. Used when secret.create=true and skipVerify=false. The PEM-format CA certificate file.
        # NOTE: The content of the file itself should be used here, not the file path.
        #       The file will be stored as a secret in kubernetes.
        caFile: ""
      # Specifies whether the etcd's TLS cert will be verified. If set to false, a CA certificate must be made
      # available as part of the etcd secret to verify the TLS cert with.
      skipVerify: true
    proxy:
      # Specifies whether to collect proxy metrics.
      enabled: true
    scheduler:
      # Specifies whether to collect scheduler metrics.
      enabled: true

  # The ports to be exposed by the agent to the host.
  # Make sure that only necessary ports are exposed, <hostIP, hostPort, protocol> combination must
  # be unique across all the nodes in k8s cluster. Any port can be disabled,
  # For example to disable zipkin ports set `agent.ports.zipkin: null`.
  ports:
    otlp:
      containerPort: 4317
      hostPort: 4317
      protocol: TCP
      enabled_for: [traces, metrics, logs, profiling]
    otlp-http:
      containerPort: 4318
      protocol: TCP
      enabled_for: [metrics, traces, logs, profiling]
    otlp-http-old:
      containerPort: 55681
      protocol: TCP
      enabled_for: [metrics, traces, logs]
    sfx-forwarder:
      containerPort: 9080
      hostPort: 9080
      protocol: TCP
      enabled_for: [traces]
    zipkin:
      containerPort: 9411
      hostPort: 9411
      protocol: TCP
      enabled_for: [traces]
    jaeger-thrift:
      containerPort: 14268
      hostPort: 14268
      protocol: TCP
      enabled_for: [traces]
    jaeger-grpc:
      containerPort: 14250
      hostPort: 14250
      protocol: TCP
      enabled_for: [traces]
    fluentforward:
      containerPort: 8006
      hostPort: 8006
      protocol: TCP
      enabled_for: [logs]
    signalfx:
      containerPort: 9943
      hostPort: 9943
      protocol: TCP
      enabled_for: [metrics]

  resources:
    limits:
      cpu: 200m
      # This value is being used as a source for default memory_limiter processor configurations
      memory: 500Mi

  # To collect container logs and journald logs, it will run the agent as a root user.
  # To run it as non root user, uncomment below `securityContext` options.
  # Setting runAsUser and runAsGroup to a non root user enables an init container that patches group
  # permissions of container logs directories on the host filesystem to make logs readable by this non root user.

  securityContext: {}
  #   runAsUser: 20000
  #   runAsGroup: 20000

  # Specifies DaemonSet update strategy.
  # Possible values: "OnDelete" and "RollingUpdate".
  updateStrategy: RollingUpdate

  # Specifies the maximum of pods that can be unavailable during update process.
  # Applicable only when updateStrategy is set to "RollingUpdate".
  # Can be an absolute number or a percentage. The default is 1.
  maxUnavailable: 1

  service:
    # create a service for the agents with a local internalTrafficPolicy
    # so that agent pods can be discovered via dns etc
    enabled: false

  # OTel agent annotations
  annotations: {}
  podAnnotations: {}

  # OTel agent extra pod labels
  podLabels: {}

  # Extra enviroment variables to be set in the OTel agent container
  extraEnvs: []

  # Extra volumes to be mounted to the agent daemonset.
  # The volumes will be available for both OTel agent and fluentd containers.
  extraVolumes: []
  extraVolumeMounts: []

  # Enable or disable features of the agent.
  featureGates: ""

  # OpenTelemetry Collector configuration for otel-agent daemonset can be overriden in this field.
  # Default configuration defined in templates/config/_otel-agent.tpl
  # Any additional fields will be merged into the defaults,
  # existing fields can be disabled by setting them to null value.
  config: {}

################################################################################
# OpenTelemetry Kubernetes cluster receiver
# This is an extra 1-replica deployment of Open-temlemetry collector used
# specifically for collecting metrics from kubernetes API.
################################################################################

# Cluster receiver collects cluster level metrics from the Kubernetes API.
# It has to be running on one pod, so it uses its own dedicated deployment with 1 replica.

clusterReceiver:
  enabled: true

  # Need to be adjusted based on size of the monitored cluster
  resources:
    limits:
      cpu: 200m
      memory: 500Mi

  # Scheduling configurations
  nodeSelector: {}
  tolerations: []
  affinity: {}

  # Pod configurations
  securityContext: {}
  terminationGracePeriodSeconds: 600
  priorityClassName: ""

  # k8s cluster receiver collector annotations
  annotations: {}
  podAnnotations: {}

  # This flag enables Kubernetes events collection using OpenTelemetry Kubernetes Events Receiver
  # https://github.com/open-telemetry/opentelemetry-collector-contrib/tree/main/receiver/k8seventsreceiver
  # This option requires `logsEnabled` to be set to `true` for either `splunkObservability` or `splunkPlatform`
  # depending on where you want to send the events. Otherwise this option will not have any effect.
  # The receiver currently is in alpha state which means that events format might change over time.
  # Once the receiver is stabilized, it'll be enabled by default in this helm chart
  eventsEnabled: true

  # Kubernetes objects collection using OpenTelemetry Kubernetes Object Receiver
  # https://github.com/open-telemetry/opentelemetry-collector-contrib/tree/main/receiver/k8sobjectsreceiver
  # This option requires `logsEnabled` to be set to `true` for either `splunkObservability` or `splunkPlatform`
  # depending on where you want to send the events. Otherwise, this option will not have any effect.
  # The receiver currently is in alpha state which means that events format might change over time.
  # Once the receiver is stabilized, it'll be enabled by default in this helm chart

  #
  # == Schema ==
  # ```
  # k8sObjects:
  #   - <objectDefinition>
  # ```
  # Each `objectDefinition` has the following fields:
  # * mode:
  #     define in which way it collects this type of object, either "pull" or "watch".
  #     - "pull" mode will read all objects of this type use the list API at an interval. Default mode.
  #     - "watch" mode will setup a long connection using the watch API to just get updates.
  # * name: [REQUIRED]
  #     name of the object, e.g. `pods`, `namespaces`.
  # * namespace:
  #     only collects objects from the specified namespace, by default it's all namespaces
  # * labelSelector:
  #     select objects by label(s)
  # * fieldSelector:
  #     select objects by field(s)
  # * interval:
  #     the interval at which object is pulled, default 60 seconds.
  #     Only useful for "pull" mode.
  #
  #
  # == Example ==
  # ```
  #  k8sObjects:
  #    - name: pods
  #      mode: pull
  #      label_selector: environment in (production),tier in (frontend)
  #      field_selector: status.phase=Running
  #      interval: 15m
  #    - name: events
  #      mode: watch
  #      group: events.k8s.io
  #      namespaces: [default]
  # ```
  #
  # The configuration format in details is described here:
  # https://github.com/open-telemetry/opentelemetry-collector-contrib/tree/main/receiver/k8sobjectsreceiver
  k8sObjects:
  #  - name: pods
  #    mode: pull
  #    label_selector: environment in (production),tier in (frontend)
  #    field_selector: status.phase=Running
  #    interval: 60s
    - name: events
      mode: watch
      group: events.k8s.io
      namespaces: [default]

  # k8s cluster receiver extra pod labels
  podLabels: {}

  # Extra enviroment variables to be set in the OTel Cluster Receiver container
  extraEnvs: []

  # Extra volumes to be mounted to the k8s cluster receiver container.
  extraVolumes: []
  extraVolumeMounts: []

  # Enable or disable features of the cluster receiver.
  featureGates: ""

  # OpenTelemetry Collector configuration for K8s Cluster Receiver deployment can be overriden in this field.
  # Default configuration defined in templates/config/_otel-k8s-cluster-receiver-config.tpl
  # Any additional fields will be merged into the defaults,
  # existing fields can be disabled by setting them to null value.
  config: {}

#################################################################
# Native OpenTelemetry logs collection
# Disabled by default in favor of fluentd.
# Can be enabled by setting "logsEngine: otel".
# Receiver Documentation: https://github.com/open-telemetry/opentelemetry-collector-contrib/tree/main/receiver/filelogreceiver
# OpenTelemetry Logging Documentation: https://opentelemetry.io/docs/specs/otel/logs
#################################################################

logsCollection:

  # Container logs collection
  containers:
    enabled: true
    # Container runtime. One of `docker`, `cri-o`, or `containerd`
    # Automatically discovered if not set.
    containerRuntime: ""
    # Paths of logfiles to exclude. object type is array:
    # i.e. to exclude `kube-system` namespace,
    # excludePaths: ["/var/log/pods/kube-system_*/*/*.log"]
    excludePaths:
    - "/var/log/pods/amazon-cloudwatch_*/*/*.log"
    - "/var/log/pods/argo-rollouts_*/*/*.log"
    - "/var/log/pods/argocd_*/*/*.log"
    - "/var/log/pods/awx_*/*/*.log"
    - "/var/log/pods/concurency-check_*/*/*.log"
    - "/var/log/pods/default_*/*/*.log"
    - "/var/log/pods/goldilocks_*/*/*.log"
    - "/var/log/pods/datadog_*/*/*.log"
    - "/var/log/pods/jenkins_*/*/*.log"
    - "/var/log/pods/kube-node-lease_*/*/*.log"
    - "/var/log/pods/kube-public_*/*/*.log"
    - "/var/log/pods/istio-system_*/*/*.log"
    - "/var/log/pods/kube-system_*/*/*.log"
    - "/var/log/pods/logging_*/*/*.log"
    - "/var/log/pods/prometheus_*/*/*.log"
    - "/var/log/pods/splunk_*/*/*.log"
    - "/var/log/pods/ui-client_*/*/*.log"
    - "/var/log/pods/whatap-monitoring_*/*/*.log"
    - "/var/log/pods/whatap-agent_*/*/*.log"    
    # Boolean for ingesting the agent's own log
    excludeAgentLogs: true
    # Extra operators for container logs.
    # https://github.com/open-telemetry/opentelemetry-collector-contrib/blob/main/pkg/stanza/docs/operators/README.md
    extraOperators: []

    # Multiline logs processing configuration. Multiline logs that written by containers to stdout
    # are usually broken down into several one-line logs and can be reconstructed with a regex
    # expression that matches the first line of each logs batch. The following operator is being
    # utilized for this purpose:
    # https://github.com/open-telemetry/opentelemetry-log-collection/blob/main/docs/operators/recombine.md
    # By the time of reconstructing a multiline log the following information is available to
    # identify source of the logs: namespace, pod and container names. At least one source
    # identifier has to be specified in for each multiline config.
    # The following example shows how to setup multiline log processing for logs having subsequent
    # log lines written with an offset. Let's say a k8s deployment called "buttercup-app" is
    # scheduled to run in "default" namespace with a java container called "server", and the
    # container produces the following log example:
    #  .........
    #  Exception in thread "main" java.lang.NumberFormatException: For input string: "3.1415"
    #      at java.lang.NumberFormatException.forInputString(NumberFormatException.java:65)
    #      at java.lang.Integer.parseInt(Integer.java:580)
    #      at ExampleCli.parseNumericArgument(ExampleCli.java:47)
    #      at ExampleCli.parseCliOptions(ExampleCli.java:27)
    #      at ExampleCli.main(ExampleCli.java:11)
    #  .........
    # The following sample configuration will handle multiline logs from that specific container:
    # multilineConfigs:
    #   - namespaceName:
    #       value: default
    #     podName:
    #       value: buttercup-app-.*
    #       useRegexp: true
    #     containerName:
    #       value: server
    #     firstEntryRegex: ^[^\s].*
    multilineConfigs: []
    # Set useSplunkIncludeAnnotation flag to `true` to collect logs from pods with `splunk.com/include: true` annotation and ignore others.
    # All other logs will be ignored.
    useSplunkIncludeAnnotation: false
    # maxRecombineLogsSize sets the maximum size in bytes of a message recombined from cri-o, containerd and docker log entries.
    # Set to 0 to remove any size limit.
    maxRecombineLogSize: 1048576

# Configuration for collecting journald logs using otel collector
  journald:
    enabled: false
    # Please update directory path for journald if it's different from below default value "/var/log/journal"
    directory: /var/log/journal
    # List of service units to collect journald logs for and configuration for each.
    units:
      - name: kubelet
        priority: info
      - name: docker
        priority: info
      - name: containerd
        priority: info
    # Route journald logs to its own Splunk Index by specifying the index value below, else leave it blank. Please make sure the index exist in Splunk and is configured to receive HEC traffic. Not applicable to Splunk Observability.
    index: ""

  checkpointPath: "/var/addon/splunk/otel_pos"

  # Files on k8s nodes to tail.
  # Make sure to configure volume mounts properly at `agent.extraVolumes` and `agent.extraVolumeMounts`.
  extraFileLogs: {}
  # Sample configuration to collect Audit logs. Please note hostPath can vary depending on the audit-policy.yaml configuration.
  # extraFileLogs:
  #   filelog/audit-log:
  #     include: [/var/log/kubernetes/apiserver/audit.log]
  #     start_at: beginning
  #     include_file_path: true
  #     include_file_name: false
  #     resource:
  #       com.splunk.source: /var/log/kubernetes/apiserver/audit.log
  #       host.name: 'EXPR(env("K8S_NODE_NAME"))'
  #       com.splunk.sourcetype: kube:apiserver-audit

################################################################################
# Fluentd sidecar configuration for logs collection.
# As of now, this is the recommended way to collect k8s logs,
# but it will be replaced by the native otel logs collection soon.
################################################################################

fluentd:
  resources:
    limits:
      cpu: 500m
      memory: 500Mi
    requests:
      cpu: 100m
      memory: 200Mi

  securityContext:
    runAsUser: 0

  # Extra enviroment variables to be set in the FluentD container
  extraEnvs: []

  config:
    # Configurations for container logs
    containers:
      # Path to root directory of container logs
      path: /var/log
      # Final volume destination of container log symlinks
      pathDest: /var/lib/docker/containers
      # Log format type, "json" or "cri".
      # If omitted (default), the value is detected automatically based on container runtime.
      # "json" is set if docker runtime detected, otherwise it defaults to "cri".
      logFormatType: ""
      # Specify the log format for "cri" logFormatType
      # It can be "%Y-%m-%dT%H:%M:%S.%N%:z" for openshift and "%Y-%m-%dT%H:%M:%S.%NZ" for IBM IKS
      criTimeFormat: "%Y-%m-%dT%H:%M:%S.%N%:z"

    # Directory where to read journald logs. (docker daemon logs, kubelet logs, and anyother specified serivce logs)
    journalLogPath: /run/log/journal

    # Controls the output buffer for the fluentd daemonset
    # Note that, for memory buffer, if `resources.limits.memory` is set,
    # the total buffer size should not bigger than the memory limit, it should also
    # consider the basic memory usage by fluentd itself.
    # All buffer parameters (except Argument) defined in
    # https://docs.fluentd.org/v1.0/articles/buffer-section#parameters
    # can be configured here.
    buffer:
      "@type": memory
      total_limit_size: 600m
      chunk_limit_size: 1m
      chunk_limit_records: 100000
      flush_interval: 5s
      flush_thread_count: 1
      overflow_action: block
      retry_max_times: 3

    # logLevel is to set log level of the Splunk log collector.
    # Available values are: trace, debug, info, warn, error
    logLevel: info

    # path of logfiles, default /var/log/containers/*.log
    path: /var/log/containers/*.log
    # paths of logfiles to exclude. object type is array as per fluentd specification:
    # https://docs.fluentd.org/input/tail#exclude_path
    excludePath: []
    #  - /var/log/containers/kube-svc-redirect*.log
    #  - /var/log/containers/tiller*.log

    # Prefix for pos_file tail source parameter
    # Can be used if you want to run multiple instances of fluentd on the same host
    # https://docs.fluentd.org/input/tail#pos_file-highly-recommended
    posFilePrefix: /var/log/splunk-fluentd

    # `customFilters` defines the custom filters to be used.
    # This section can be used to define custom filters using plugins like https://github.com/splunk/fluent-plugin-jq
    # Its also possible to use other filters like https://www.fluentd.org/plugins#filter
    #
    # The scheme to define a custom filter is:
    #
    # ```
    # <name>:
    #   tag: <fluentd tag for the filter>
    #   type: <fluentd filter type>
    #   body: <definition of the fluentd filter>
    # ```
    #
    # = fluentd tag for the filter =
    # This is the fluentd tag for the record
    #
    # = fluentd filter type =
    # This is the fluentd filter that the user wants to use for record manipulation.
    #
    # = definition of the fluentd filter =
    # This defines the body/logic for using the filter for record manipulation.
    #
    # For example if you want to define a filter which sets cluster_name field to "my_awesome_cluster" you would the following filter
    # <filter tail.containers.**>
    #  @type jq_transformer
    #  jq '.record.cluster_name = "my_awesome_cluster" | .record'
    # </filter>
    # This can be defined in the customFilters section as follows:
    # ```
    # customFilters:
    #   NamespaceSourcetypeFilter:
    #     tag: tail.containers.**
    #     type: jq_transformer
    #     body: jq '.record.cluster_name = "my_awesome_cluster" | .record'
    # ```
    customFilters: {}

    # `logs` defines the source of logs, multiline support, and their sourcetypes.
    #
    # The scheme to define a log is:
    #
    # ```
    # <name>:
    #   from:
    #     <source>
    #   timestampExtraction:
    #     regexp: "<regexp_to_extract_timestamp_from_log>"
    #     format: "<format_of_the_timestamp>"
    #   multiline:
    #     firstline: "<regexp_to_detect_firstline_of_multiline>"
    #     flushInterval: 5s
    #   sourcetype: "<sourcetype_of_logs>"
    # ```
    #
    # = <source> =
    # It supports 3 kinds of sources: journald, file, and container.
    # For `journald` logs, `unit` is required for filtering using _SYSTEMD_UNIT, example:
    # ```
    # docker:
    #   from:
    #     journald:
    #       unit: docker.service
    # ```
    #
    # For `file` logs, `path` is required for specifying where is the log files. Log files are expected in `/var/log`, example:
    # ```
    # docker:
    #   from:
    #     file:
    #       path: /var/log/docker.log
    # ```
    #
    # For `container` logs, `pod` field is required. It represents part of
    # the pod name, can be name of a deployment or replica set. Use "*" to
    # apply the configuration to all pods. Optional `container` value can be
    # used to apply configuration to a particular container.
    # ```
    # kube-apiserver:
    #   from:
    #     pod: kube-apiserver
    #
    # etcd:
    #   from:
    #     pod: etcd-server
    #     container: etcd-container
    # ```
    #
    # = timestamp =
    # `timestampExtraction` defines how to extract timestamp from logs. This *only* works for `file` source.
    # To use `timestampExtraction` you need to define both:
    # - `regexp`: the Regular Expression used to find the timestamp from a log entry.
    #             The timestamp part must be in a `time` named group. E.g.
    #             (?<time>\d{4}-\d{2}-\d{2} \d{2}:\d{2}:\d{2})
    # - `format`: a format string defintes how to parse the timestamp, e.g. "%Y-%m-%d %H:%M:%S".
    #             More details can be find: http://ruby-doc.org/stdlib-2.5.0/libdoc/time/rdoc/Time.html#method-c-strptime
    #
    # = multiline =
    # `multiline` options provide basic multiline support. Two options:
    # - `firstline`: a Regular Expression used to detect the first line of a multiline log.
    # - `flushInterval`: The interval between data flushes, default value: 5s.
    #
    # = sourcetype =
    # sourcetype of each kind of log can be defined using the `sourcetype` field.
    # If `sourcetype` is not defined, `name` will be used.
    #
    # ---
    # Here we have some default timestampExtraction and multiline settings for kubernetes components.
    # So, usually you just need to redefine the source of those components if necessary.
    logs:
      docker:
        from:
          journald:
            unit: docker.service
        timestampExtraction:
          regexp: time="(?<time>\d{4}-\d{2}-\d{2}T[0-2]\d:[0-5]\d:[0-5]\d.\d{9}Z)"
          format: "%Y-%m-%dT%H:%M:%S.%NZ"
        sourcetype: kube:docker
      kubelet: &glog
        from:
          journald:
            unit: kubelet.service
        timestampExtraction:
          regexp: \w(?<time>[0-1]\d[0-3]\d [^\s]*)
          format: "%m%d %H:%M:%S.%N"
        multiline:
          firstline: /^\w[0-1]\d[0-3]\d/
        sourcetype: kube:kubelet
      etcd:
        from:
          pod: etcd-server
          container: etcd-container
        timestampExtraction:
          regexp: (?<time>\d{4}-\d{2}-\d{2} [0-2]\d:[0-5]\d:[0-5]\d\.\d{6})
          format: "%Y-%m-%d %H:%M:%S.%N"
      etcd-minikube:
        from:
          pod: etcd-minikube
          container: etcd
        timestampExtraction:
          regexp: (?<time>\d{4}-\d{2}-\d{2} [0-2]\d:[0-5]\d:[0-5]\d\.\d{6})
          format: "%Y-%m-%d %H:%M:%S.%N"
      etcd-events:
        from:
          pod: etcd-server-events
          container: etcd-container
        timestampExtraction:
          regexp: (?<time>\d{4}-[0-1]\d-[0-3]\d [0-2]\d:[0-5]\d:[0-5]\d\.\d{6})
          format: "%Y-%m-%d %H:%M:%S.%N"
      kube-apiserver:
        <<: *glog
        from:
          pod: kube-apiserver
        sourcetype: kube:kube-apiserver
      kube-scheduler:
        <<: *glog
        from:
          pod: kube-scheduler
        sourcetype: kube:kube-scheduler
      kube-controller-manager:
        <<: *glog
        from:
          pod: kube-controller-manager
        sourcetype: kube:kube-controller-manager
      kube-proxy:
        <<: *glog
        from:
          pod: kube-proxy
        sourcetype: kube:kube-proxy
      kubedns:
        <<: *glog
        from:
          pod: kube-dns
        sourcetype: kube:kubedns
      dnsmasq:
        <<: *glog
        from:
          pod: kube-dns
        sourcetype: kube:dnsmasq
      dns-sidecar:
        <<: *glog
        from:
          pod: kube-dns
          container: sidecar
        sourcetype: kube:kubedns-sidecar
      dns-controller:
        <<: *glog
        from:
          pod: dns-controller
        sourcetype: kube:dns-controller
      kube-dns-autoscaler:
        <<: *glog
        from:
          pod: kube-dns-autoscaler
          container: autoscaler
        sourcetype: kube:kube-dns-autoscaler
      kube-audit:
        from:
          file:
            path: /var/log/kube-apiserver-audit.log
        timestampExtraction:
          format: "%Y-%m-%dT%H:%M:%SZ"
        sourcetype: kube:apiserver-audit

################################################################################
# Docker image configuration
################################################################################

image:
  # Secrets to attach to the respective serviceaccount to pull docker images
  imagePullSecrets: []

  fluentd:
    # The registry and name of the fluentd image to pull
    repository: splunk/fluentd-hec
    # The tag of the fluentd image to pull
    tag: 1.3.3
    # The policy that specifies when the user wants the fluentd images to be pulled
    pullPolicy: IfNotPresent

  otelcol:
    # The registry and name of the opentelemetry collector image to pull
    repository: quay.io/signalfx/splunk-otel-collector
    # The tag of the Splunk OTel Collector image, default value is the chart appVersion
    tag: ""
    # The policy that specifies when the user wants the opentelemetry collector images to be pulled
    pullPolicy: IfNotPresent

  # Image to be used by init container that patches log directories on the host, so the collector can read from them as a non-root user.
  # Effective only if `agent.securityContext.runAsUser` and `agent.securityContext.runAsGroup` are set to non-zero values.
  initPatchLogDirs:
    # The registry and name of the Universal Base Image 9 image to pull
    repository: registry.access.redhat.com/ubi9/ubi
    # The tag of the Universal Base Image 9, default value is latest
    tag: ""
    # The policy that specifies when the user wants the Universal Base images to be pulled
    pullPolicy: IfNotPresent


################################################################################
# Extra system configuration
################################################################################

## Limits how many pods may be unavailable due to voluntary disruptions.
## https://kubernetes.io/docs/tasks/run-application/configure-pdb/
podDisruptionBudget: {}
  # Minimum number of pods (as a number or percentage) that must remain available.
  # minAvailable:
  # Maximum number of pods (as a number or percentage) that can be unavailable.
  # maxUnavailable:

serviceAccount:
  # Specifies whether a ServiceAccount should be created
  create: true
  # The name of the ServiceAccount to use.
  # If not set and create is true, a name is generated using the fullname template
  name: ""

  # Service account annotations
  annotations: {}

rbac:
  # Create or use existing RBAC resources
  create: true
  # Specifies additional rules that will be added to the clusterRole.
  customRules: []

# Create or use existing secret if name is empty default name is used
secret:
  create: true
  name: ""
  # Specifies whether secret provided by user should be validated.
  validateSecret: true

# This default tolerations allow the daemonset to be deployed on control-plane
# nodes, so that we can also collect logs and metrics from those nodes.
tolerations:
  - key: node-role.kubernetes.io/master
    effect: NoSchedule
  - key: node-role.kubernetes.io/control-plane
    effect: NoSchedule

# Defines which nodes should be selected to deploy the o11y collector daemonset.
nodeSelector: {}
terminationGracePeriodSeconds: 600

# Defines node affinity to restrict deployment of the o11y collector daemonset.
affinity: {}

# Defines priorityClassName to assign a priority class to pods.
priorityClassName: ""

# This tells the kubelet that it should wait for x seconds before performing the first probe.
# This is required in case you are using windows worker nodes.
# It is recommended to keep it a 60-second window but it depends on cluster specification.
readinessProbe:
  initialDelaySeconds: 0
livenessProbe:
  initialDelaySeconds: 0

# Specifies whether to apply for k8s cluster with windows worker node.
isWindows: false

# Whether to automatically create Openshift SCC or to create it manually.
# NOTE: This config will only be used when distribution=openshift
securityContextConstraints:
  create: true

# Openshift SecurityContextConstraints can be overriden in this field.
# This fields will be merged into the default config that can be found at
# https://github.com/signalfx/splunk-otel-collector-chart/blob/main/helm-charts/splunk-otel-collector/templates/securityContextConstraints.yaml
# NOTE: This config will only be used when distribution=openshift
securityContextConstraintsOverwrite: {}

################################################################################
# OpenTelemetry "collector" k8s deployment configuration.
# This is an additional deployment of Open-telemetry collector that can be used
# to pass traces trough it, make k8s metadata enrichment and batching.
# Another use case is to point tracing instrumentation libraries directly to
# the collector endpoint instead of local agents. The collector running in the
# passthrough mode is recommended for large k8s clusters, disabled by default.
################################################################################

gateway:
  # Defines if collector deployment is enabled
  # Recommended for large k8s clusters, disabled by default.
  enabled: false

  # Number of collector replicas
  replicaCount: 3

  # The ports exposed by the collector container.
  # Any port can be disabled by setting to null.
  # Any changes should be aligned with service.ports configuration below.
  ports:
    otlp:
      containerPort: 4317
      protocol: TCP
      enabled_for: [metrics, traces, logs]
    otlp-http:
      containerPort: 4318
      protocol: TCP
      enabled_for: [metrics, traces, logs]
    otlp-http-old:
      containerPort: 55681
      protocol: TCP
      enabled_for: [metrics, traces, logs]
    jaeger-thrift:
      containerPort: 14268
      protocol: TCP
      enabled_for: [traces]
    jaeger-grpc:
      containerPort: 14250
      protocol: TCP
      enabled_for: [traces]
    zipkin:
      containerPort: 9411
      protocol: TCP
      enabled_for: [traces]
    signalfx:
      containerPort: 9943
      protocol: TCP
      # SignalFx metrics enabled in gateway for all telemetry types since there may be
      # bundled metrics.
      enabled_for: [metrics, traces, logs]
    http-forwarder:
      containerPort: 6060
      protocol: TCP
      # Enabled for all because SignalFx exporter will always send metadata updates when enabled.
      enabled_for: [metrics, traces, logs]

  resources:
    limits:
      cpu: 4
      # Memory limit value is used as a source for default memory_limiter configuration
      memory: 8Gi

  # Scheduling configurations
  nodeSelector: {}
  tolerations: []
  affinity: {}

  # Pod configurations
  securityContext: {}
  terminationGracePeriodSeconds: 600
  priorityClassName: ""

  # OTel collector annotations
  annotations: {}
  podAnnotations: {}

  # OTel collector extra pod labels
  podLabels: {}

  # Extra enviroment variables to be set in the standalone OTel collector container
  extraEnvs: []

  # Extra volumes to be mounted to the OTel Collector container.
  extraVolumes: []
  extraVolumeMounts: []

  # Enable or disable features of the gateway.
  featureGates: ""

  # OpenTelemetry Collector configuration for standalone otel-collector deployment can be overriden in this field.
  # Default configuration defined in config/otel-collector-config.yaml
  # Any additional fields will be merged into the defaults,
  # existing fields can be disabled by setting them to `null`.
  config: {}

################################################################################
# OpenTelemetry service config, used for otel collector deployment.
# Disabled by default
################################################################################

# opentelemetry collector service created only if collector.enabled = true
service:
  # Service type
  type: ClusterIP
  # Service annotations
  annotations: {}

# Default values for splunk-otel-network-explorer.
networkExplorer:
  enabled: false
  images:
    tag: latest-v0.9
    repository: quay.io/signalfx
    pullPolicy: Always

  imagePullSecrets: []

  log:
    console: false
    # possible values: { error | warning | info | debug | trace }
    level: warning

  debug:
    enabled: false
    storeMinidump: false
    sendUnplannedExitMetric: true

  kernelCollector:
    enabled: true
    image:
      name: splunk-network-explorer-kernel-collector
      # tag: latest-v0.9
      # repository: quay.io/signalfx
    nodeSelector: {}
    disableHttpMetrics: false

    serviceAccount:
      create: false
      name: ""

    tolerations:
    - operator: "Exists"
      effect: "NoExecute"
    - operator: "Exists"
      effect: "NoSchedule"

    affinity: {}
    resources: {}

    # uncomment the line below to disable automatic kernel headers fetching
    # fetchKernelHeaders: false

    # uncomment to enable enrichment using Docker metadata
    # useDockerMetadata: true

    # uncomment to enable enrichment using Nomad metadata (https://www.nomadproject.io/)
    # collectNomadMetadata: true

  cloudCollector:
    enabled: false
    image:
      name: splunk-network-explorer-cloud-collector
      # tag: latest-v0.9
      # repository: quay.io/signalfx
    serviceAccount:
      create: false
      name: ""
      iamRole: ""

  k8sCollector:
    enabled: true
    relay:
      image:
        name: splunk-network-explorer-k8s-relay
        # tag: latest-v0.9
        # repository: quay.io/signalfx
    watcher:
      image:
        name: splunk-network-explorer-k8s-watcher
        # tag: latest-v0.9
        # repository: quay.io/signalfx
    serviceAccount:
      create: false
      name: ""

  reducer:
    ingestShards: 1
    matchingShards: 1
    aggregationShards: 1
    disableInternalMetrics: true
    disableMetrics: []
      ### to disable an entire metric category: ###
      # - tcp.all
      # - udp.all
      # - dns.all
      # - http.all
      ### to disable an individual metric: ###
      ### tcp ###
      # - tcp.bytes
      # - tcp.rtt.num_measurements
      # - tcp.active
      # - tcp.rtt.average
      # - tcp.packets
      # - tcp.retrans
      # - tcp.syn_timeouts
      # - tcp.new_sockets
      # - tcp.resets
      ### udp ###
      # - udp.bytes
      # - udp.packets
      # - udp.active
      # - udp.drops
      ### dns ###
      # - dns.client.duration.average
      # - dns.server.duration.average
      # - dns.active_sockets
      # - dns.responses
      # - dns.timeouts
      ### http ##
      # - http.client.duration.average
      # - http.server.duration.average
      # - http.active_sockets
      # - http.status_code
      ### ebpf_net ##
      # - ebpf_net.span_utilization_fraction
      # - ebpf_net.pipeline_metric_bytes_discarded
      # - ebpf_net.codetiming_min_ns
      # - ebpf_net.entrypoint_info
      # - ebpf_net.otlp_grpc.requests_sent
      # - ebpf_net.connections
      # - ebpf_net.rpc_queue_elem_utilization_fraction
      # - ebpf_net.disconnects
      # - ebpf_net.codetiming_avg_ns
      # - ebpf_net.client_handle_pool
      # - ebpf_net.otlp_grpc.successful_requests
      # - ebpf_net.span_utilization
      # - ebpf_net.up
      # - ebpf_net.rpc_queue_buf_utilization_fraction
      # - ebpf_net.collector_log_count
      # - ebpf_net.time_since_last_message_ns
      # - ebpf_net.bpf_log
      # - ebpf_net.codetiming_count
      # - ebpf_net.message
      # - ebpf_net.otlp_grpc.bytes_sent
      # - ebpf_net.pipeline_message_error
      # - ebpf_net.pipeline_metric_bytes_written
      # - ebpf_net.codetiming_max_ns
      # - ebpf_net.span_utilization_max
      # - ebpf_net.client_handle_pool_fraction
      # - ebpf_net.span_utilization_fraction
      # - ebpf_net.rpc_latency_ns
      # - ebpf_net.agg_root_truncation
      # - ebpf_net.clock_offset_ns
      # - ebpf_net.otlp_grpc.metrics_sent
      # - ebpf_net.otlp_grpc.unknown_response_tags
      # - ebpf_net.collector_health
      # - ebpf_net.codetiming_sum_ns
      # - ebpf_net.otlp_grpc.failed_requests
      # - ebpf_net.rpc_queue_buf_utilization
      ### to enable all metrics (including metrics turned off by default): ###
      # - none
    enableMetrics: []
      ### Disable metrics flag is evaluated first and only then enable metric flag is evaluated. ###
      ### to enable an entire metric category: ###
      # - tcp.all
      # - udp.all
      # - dns.all
      # - http.all
      # - ebpf_net.all
      ### to enable an individual metric: ###
      ### tcp ###
      # - tcp.bytes
      # - tcp.rtt.num_measurements
      # - tcp.active
      # - tcp.rtt.average
      # - tcp.packets
      # - tcp.retrans
      # - tcp.syn_timeouts
      # - tcp.new_sockets
      # - tcp.resets
      ### udp ###
      # - udp.bytes
      # - udp.packets
      # - udp.active
      # - udp.drops
      ### dns ###
      # - dns.client.duration.average
      # - dns.server.duration.average
      # - dns.active_sockets
      # - dns.responses
      # - dns.timeouts
      ### http ###
      # - http.client.duration.average
      # - http.server.duration.average
      # - http.active_sockets
      # - http.status_code
      ### ebpf_net ###
      # - ebpf_net.span_utilization_fraction
      # - ebpf_net.pipeline_metric_bytes_discarded
      # - ebpf_net.codetiming_min_ns
      # - ebpf_net.entrypoint_info
      # - ebpf_net.otlp_grpc.requests_sent
      # - ebpf_net.connections
      # - ebpf_net.rpc_queue_elem_utilization_fraction
      # - ebpf_net.disconnects
      # - ebpf_net.codetiming_avg_ns
      # - ebpf_net.client_handle_pool
      # - ebpf_net.otlp_grpc.successful_requests
      # - ebpf_net.span_utilization
      # - ebpf_net.up
      # - ebpf_net.rpc_queue_buf_utilization_fraction
      # - ebpf_net.collector_log_count
      # - ebpf_net.time_since_last_message_ns
      # - ebpf_net.bpf_log
      # - ebpf_net.codetiming_count
      # - ebpf_net.message
      # - ebpf_net.otlp_grpc.bytes_sent
      # - ebpf_net.pipeline_message_error
      # - ebpf_net.pipeline_metric_bytes_written
      # - ebpf_net.codetiming_max_ns
      # - ebpf_net.span_utilization_max
      # - ebpf_net.client_handle_pool_fraction
      # - ebpf_net.span_utilization_fraction
      # - ebpf_net.rpc_latency_ns
      # - ebpf_net.agg_root_truncation
      # - ebpf_net.clock_offset_ns
      # - ebpf_net.otlp_grpc.metrics_sent
      # - ebpf_net.otlp_grpc.unknown_response_tags
      # - ebpf_net.collector_health
      # - ebpf_net.codetiming_sum_ns
      # - ebpf_net.otlp_grpc.failed_requests
      # - ebpf_net.rpc_queue_buf_utilization
    telemetryPort: 7000
    statsPromPort: 7001
    image:
      name: splunk-network-explorer-reducer
      # tag: latest-v0.9
      # repository: quay.io/signalfx
    resources: {}
    nodeSelector: {}
    affinity: {}
    tolerations: []

  rbac:
    create: true

  podSecurityPolicy:
    enabled: true
    annotations: {}
      ## Specify pod annotations
      ## Ref: https://kubernetes.io/docs/concepts/policy/pod-security-policy/#apparmor
      ## Ref: https://kubernetes.io/docs/concepts/policy/pod-security-policy/#seccomp
      ## Ref: https://kubernetes.io/docs/concepts/policy/pod-security-policy/#sysctl
      ##
      # seccomp.security.alpha.kubernetes.io/allowedProfileNames: '*'
      # seccomp.security.alpha.kubernetes.io/defaultProfileName: 'docker/default'
      # apparmor.security.beta.kubernetes.io/defaultProfileName: 'runtime/default'


################################################################################
# Notice: Operator related features should be considered to have an alpha
# maturity level and be experimental. There may be breaking changes or Operator
# features may be replaced entirely with a better alternative in the future.
#
# The OpenTelemetry Operator running as a deployment with a replica count of 1.
# It auto-instruments applications to emit telemetry data.
# Related documentation: https://github.com/signalfx/splunk-otel-collector-chart/blob/main/docs/auto-instrumentation-install.md
# Full list of Helm value configurations: https://artifacthub.io/packages/helm/opentelemetry-helm/opentelemetry-operator?modal=values
################################################################################

operator:
  enabled: false
  # For more details, refer to: https://github.com/open-telemetry/opentelemetry-operator/blob/main/docs/api.md#instrumentation
  instrumentation:
    # Overrides for default instrumentation configurations can be specified here.
    # Source: https://github.com/signalfx/splunk-otel-collector-chart/blob/main/helm-charts/splunk-otel-collector/templates/operator/instrumentation.yaml
    # Rendered Default: https://github.com/signalfx/splunk-otel-collector-chart/blob/main/examples/enable-operator-and-auto-instrumentation/rendered_manifests/operator/instrumentation.yaml
    spec:
      # Optional "endpoint" parameter for exporting data to a specific target.
      # By default, the endpoint will be set to the agent if it's enabled. If the agent is not enabled, the endpoint
      # will default to the gateway, given it is enabled. If neither the agent nor the gateway is enabled, the endpoint
      # must be overridden here.
      # exporter:
        # endpoint: http://$(SPLUNK_OTEL_AGENT):4317
      # Optional "sampler" parameter for enabling trace sampling, see: https://opentelemetry.io/docs/concepts/sdk-configuration/general-sdk-configuration/#otel_traces_sampler
      # sampler:
        # type: traceidratio
        # argument: "0.95"
      # Optional "environment variable" parameters that can configure all instrumentation libraries.
      # If splunkObservability.profilingEnabled=true, environment variables enabling profiling will be added automatically.
      # env:
      # Auto-instrumentation Libraries (Start)
      # Below are configurations for the instrumentation libraries utilized in Auto-instrumentation.
      # Highlights:
      #   - Maturity varies among libraries (e.g., Java is more mature than Go). Check each library's stability here: https://opentelemetry.io/docs/instrumentation/#status-and-releases
      #   - Some libraries may be enabled by default. The current status can be checked here: https://github.com/open-telemetry/opentelemetry-operator#controlling-instrumentation-capabilities
      #   - Splunk provides best-effort support for native OpenTelemetry libraries, while offering full support for its own distributions.
      # Each library supports the following fields:
      #   - repository: Specifies the Docker image repository.
      #   - tag: Indicates the Docker image tag.
      #   - env: (Optional) Allows you to add any additional environment variables.
      java:
        repository: ghcr.io/signalfx/splunk-otel-java/splunk-otel-java
        tag: v1.28.0
      nodejs:
        repository: ghcr.io/open-telemetry/opentelemetry-operator/autoinstrumentation-nodejs
        tag: 0.41.1
      go:
        repository: ghcr.io/open-telemetry/opentelemetry-operator/autoinstrumentation-go
        tag: v0.2.2-alpha
      apache-httpd:
        repository: ghcr.io/open-telemetry/opentelemetry-operator/autoinstrumentation-apache-httpd
        tag: 1.0.3
      python:
        repository: ghcr.io/open-telemetry/opentelemetry-operator/autoinstrumentation-python
        tag: 0.40b0
      dotnet:
        repository: ghcr.io/open-telemetry/opentelemetry-operator/autoinstrumentation-dotnet
        tag: 1.0.0-rc.2
      # Auto-instrumentation Libraries (End)
  admissionWebhooks:
    certManager:
      # Annotate the certificate and issuer to ensure they are created after the cert-manager CRDs have been installed.
      certificateAnnotations:
        "helm.sh/hook": post-install,post-upgrade
        "helm.sh/hook-weight": "1"
      issuerAnnotations:
        "helm.sh/hook": post-install,post-upgrade
        "helm.sh/hook-weight": "1"

# The cert-manager is a CNCF application deployed as a subchart and used for supporting operators that require TLS certificates.
# Full list of Helm value configurations: https://artifacthub.io/packages/helm/cert-manager/cert-manager?modal=values
certmanager:
  enabled: false
  installCRDs: true


################################################################################
# Helm Chart Feature Gates.
# The following feature gates are used to enable/disable features in the Helm chart
# that are not yet ready for general availability.
# Options in this section are not guaranteed to be stable and may change at any time.
################################################################################

featureGates:
  # Use Light Prometheus Receiver for metrics collection from discovered Prometheus endpoints.
  # https://github.com/signalfx/splunk-otel-collector/tree/main/internal/receiver/lightprometheusreceiver
  # Light Prometheus Receiver is optimized for performance and reduced memory footprint.
  # From the other hand, it does not support all Prometheus configuration options.
  useLightPrometheusReceiver: false
반응형

관련글 더보기

댓글 영역