# Default values for retina.
# This is a YAML-formatted file.
# Declare variables to be passed into your templates.

# Support linux and windows by default.
os:
  linux: true
  windows: true

operator:
  name: retina-operator
  enabled: false
  repository: ghcr.io/microsoft/retina/retina-operator
  tag: "v0.0.2"
  installCRDs: true
  enableRetinaEndpoint: false
  resources:
    limits:
      cpu: 500m
      memory: 128Mi
    requests:
      cpu: 10m
      memory: 128Mi
  container:
    command:
      - "/retina-operator"
    args:
      - "--config"
      - "/retina/operator-config.yaml"
  telemetryInterval: "5m"
  affinity:
    # TODO(user): Override the following values to configure the nodeAffinity expression
    # according to the platforms which are supported by your solution.
    # It is considered best practice to support multiple architectures. You can
    # build your manager image using the makefile target docker-buildx.
    nodeAffinity:
      requiredDuringSchedulingIgnoredDuringExecution:
        nodeSelectorTerms:
          - matchExpressions:
              - key: kubernetes.io/os
                operator: In
                values:
                  - linux
                  #- windows

image:
  repository: ghcr.io/microsoft/retina/retina-agent
  initRepository: ghcr.io/microsoft/retina/retina-init
  pullPolicy: Always
  # Overrides the image tag whose default is the chart appVersion.
  tag: "v0.0.2"

enableConntrackMetrics: false
enablePodLevel: false
remoteContext: false
enableAnnotations: false
bypassLookupIPOfInterest: false
dataAggregationLevel: "low"
dataSamplingRate: 1
# Use BPF ring buffers (BPF_MAP_TYPE_RINGBUF) instead of BPF_PERF_EVENT_ARRAY.
# Pros: lower per-event overhead at high event rates, simpler variable-sized records, more consistent latency.
# Cons: fixed-size pre-allocated (locked) memory that can be wasted on low traffic; events are dropped when the buffer fills (reserve fails).
# Possible values: "enabled", "disabled".
packetParserRingBuffer: "disabled"
# Ring buffer size in bytes (only used when packetParserRingBuffer="enabled").
# Default: 8MiB (8388608).
# Valid range: power of two between the kernel page size and 1GiB (inclusive).
# Invalid values cause startup to fail with a validation error.
# This value is not applied when ring buffers are disabled.
packetParserRingBufferSize: 8388608
# Maximum number of entries in the eBPF filter map (retina_filter).
# This map tracks IP addresses of pods of interest for network observability.
# Default: 255. Increase for large clusters with many tracked pods.
filterMapMaxEntries: 255

imagePullSecrets: []
nameOverride: "retina"
fullnameOverride: "retina-svc"

namespace: kube-system

agent:
  name: retina-agent

agent_win:
  name: retina-agent-win

retinaPort: 10093

apiServer:
  host: "0.0.0.0"
  port: 10093

# Supported - debug, info, error, warn, panic, fatal.
logLevel: debug

enabledPlugin_linux: '["dropreason","packetforward","linuxutil","dns"]'
enabledPlugin_win: '["hnsstats"]'

enableTelemetry: false

# Interval, in duration, to scrape/publish metrics.
# Default when unset or invalid (used by pluginmanager and all plugins).
metricsInterval: "10s"
metricsIntervalDuration: "10s"

azure:
  appinsights:
    instrumentation_key: "app-insights-instrumentation-key"

daemonset:
  container:
    retina:
      command:
        - "/retina/controller"
      args:
        - "--config"
        - "/retina/config/config.yaml"
      env: []
      healthProbeBindAddress: ":18081"
      metricsBindAddress: ":18080"
      ports:
        containerPort: 10093
  telemetryInterval: "15m"

# volume mounts with name and mountPath
volumeMounts:
  debug: /sys/kernel/debug
  trace: /sys/kernel/tracing
  bpf: /sys/fs/bpf
  cgroup: /sys/fs/cgroup
  tmp: /tmp
  config: /retina/config
  host-os-release: /etc/os-release

#volume mounts for windows
volumeMounts_win:
  retina-config-win: retina

securityContext:
  privileged: false
  capabilities:
    add:
      - SYS_ADMIN
      - NET_ADMIN # for packetparser plugin
      - IPC_LOCK # for mmap() calls made by NewReader(), ref: https://man7.org/linux/man-pages/man2/mmap.2.html
      - SYS_RESOURCE # for setting rlimit
  windowsOptions:
    runAsUserName: "NT AUTHORITY\\SYSTEM"

service:
  type: ClusterIP
  port: 10093
  targetPort: 10093
  name: retina

operatorService:
  type: ClusterIP
  port: 8080
  targetPort: 8080
  name: retina-operator

serviceAccount:
  annotations: {}
  name: "retina-agent"

resources:
  limits:
    memory: "300Mi"
    cpu: "500m"
  requests:
    memory: "300Mi"
    cpu: "500m"

# Retina Capture Configuration
capture:
  # debug toggles the debug mode for the Capture. Must be true in production.
  debug: true
  # jobNumLimit indicates the maximum number of jobs that can be created for each Capture.
  jobNumLimit: 0
  # enableManagedStorageAccount toggles the use of managed storage account for storing artifacts.
  # If set to true, the following fields related to Azure credentials must be set.
  # Ref: docs/captures/managed-storage-account.md
  enableManagedStorageAccount: false
  tenantId: ""
  subscriptionId: ""
  aadClientId: ""
  aadClientSecret: ""
  resourceGroup: ""
  location: ""
  managedIdentityClientId: ""

## @param nodeSelector [object] Node labels for pod assignment
## Ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/
##
nodeSelector: {}
## @param tolerations [array] Tolerations for pod assignment
## Ref: https://kubernetes.io/docs/concepts/configuration/taint-and-toleration/
##
tolerations: []
## @param affinity [object] Affinity rules for pod assignment
## Ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#affinity-and-anti-affinity
##
affinity: []

## @param readinessProbe.initialDelaySeconds [array] Initial delay seconds for readinessProbe
## @param readinessProbe.periodSeconds [array] Period seconds for readinessProbe
## @param readinessProbe.timeoutSeconds [array] Timeout seconds for readinessProbe
## @param readinessProbe.failureThreshold [array] Failure threshold for readinessProbe
## @param readinessProbe.successThreshold [array] Success threshold for readinessProbe
## Ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-probes/#configure-probes
##
readinessProbe: {}
  # initialDelaySeconds: 30
  # periodSeconds: 10
  # timeoutSeconds: 15
  # failureThreshold: 5
  # successThreshold: 1
## @param livenessProbe.initialDelaySeconds [array] Initial delay seconds for livenessProbe
## @param livenessProbe.periodSeconds [array] Period seconds for livenessProbe
## @param livenessProbe.timeoutSeconds [array] Timeout seconds for livenessProbe
## @param livenessProbe.failureThreshold [array] Failure threshold for livenessProbe
## @param livenessProbe.successThreshold [array] Success threshold for livenessProbe
## Ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-probes/#configure-probes
##
livenessProbe: {}
  # initialDelaySeconds: 30
  # periodSeconds: 10
  # timeoutSeconds: 15
  # failureThreshold: 5
  # successThreshold: 1

## @param priorityClassName [string] Indicates the pod's priority
## Ref: https://kubernetes.io/docs/concepts/scheduling-eviction/pod-priority-preemption/
##
priorityClassName: ""

metrics:
  ## Prometheus Service Monitor
  ## ref: https://github.com/coreos/prometheus-operator/blob/master/Documentation/api.md#endpoint
  ##
  podMonitor:
    ## @param metrics.podMonitor.enabled Create PodMonitor Resource for scraping metrics using PrometheusOperator
    ##
    enabled: false
    ## @param metrics.podMonitor.namespace Namespace in which the PodMonitor should be created
    ##
    namespace: ~
    ## @param metrics.podMonitor.interval Specify the interval at which metrics should be scraped
    ##
    interval: 30s
    ## @param metrics.podMonitor.scrapeTimeout Specify the timeout after which the scrape is ended
    ##
    scrapeTimeout: 30s
    ## @param metrics.podMonitor.additionalLabels [object] Additional labels that can be used so PodMonitors will be discovered by Prometheus
    ## ref: https://github.com/coreos/prometheus-operator/blob/master/Documentation/api.md#prometheusspec
    ##
    additionalLabels: {}
    ## @param metrics.podMonitor.scheme Scheme to use for scraping
    ##
    scheme: http
    ## @param metrics.podMonitor.tlsConfig [object] TLS configuration used for scrape endpoints used by Prometheus
    ## ref: https://github.com/prometheus-operator/prometheus-operator/blob/master/Documentation/api.md#tlsconfig
    ## e.g:
    ## tlsConfig:
    ##   ca:
    ##     secret:
    ##       name: existingSecretName
    ##
    tlsConfig: {}
    ## @param metrics.podMonitor.relabelings [array] Prometheus relabeling rules
    ##
    relabelings: []
  serviceMonitor:
    ## @param metrics.serviceMonitor.enabled Create serviceMonitor Resource for scraping metrics using PrometheusOperator
    ##
    enabled: false
    ## @param metrics.serviceMonitor.namespace Namespace in which the serviceMonitor should be created
    ##
    namespace: ~
    ## @param metrics.serviceMonitor.interval Specify the interval at which metrics should be scraped
    ##
    interval: 30s
    ## @param metrics.serviceMonitor.scrapeTimeout Specify the timeout after which the scrape is ended
    ##
    scrapeTimeout: 30s
    ## @param metrics.serviceMonitor.additionalLabels [object] Additional labels that can be used so serviceMonitor will be discovered by Prometheus
    ## ref: https://github.com/coreos/prometheus-operator/blob/master/Documentation/api.md#prometheusspec
    ## 'release: prometheus' label is needed for prometheus to discover ServiceMoniotrs
    ##
    additionalLabels:
      release: prometheus
      app.kubernetes.io/component: metrics
    ## @param metrics.serviceMonitor.scheme Scheme to use for scraping
    ##
    scheme: http
    ## @param metrics.serviceMonitor.tlsConfig [object] TLS configuration used for scrape endpoints used by Prometheus
    ## ref: https://github.com/prometheus-operator/prometheus-operator/blob/master/Documentation/api.md#tlsconfig
    ## e.g:
    ## tlsConfig:
    ##   ca:
    ##     secret:
    ##       name: existingSecretName
    ##
    tlsConfig: {}
    ## @param metrics.serviceMonitor.relabelings [array] Prometheus relabeling rules to apply to samples before scraping
    ##
    relabelings: 
      - sourceLabels:
          [__address__, __meta_kubernetes_pod_annotation_prometheus_io_port]
        separator: ":"
        regex: ([^:]+)(?::\d+)?
        targetLabel: __address__
        replacement: ${1}:${2}
        action: replace
      - sourceLabels: [__meta_kubernetes_pod_node_name]
        action: replace
        targetLabel: instance
    ## @param metrics.serviceMonitor.metricRelabelings [array] Prometheus relabeling rules to apply to samples before ingestion
    ##
    metricRelabelings:
      - sourceLabels: [__name__]
        action: keep
        regex: (.*)
