{"id":820,"date":"2020-09-27T14:45:51","date_gmt":"2020-09-27T06:45:51","guid":{"rendered":"https:\/\/www.linuxdevops.cn\/?p=820"},"modified":"2021-09-03T16:14:39","modified_gmt":"2021-09-03T08:14:39","slug":"kubernetes-helm-deployment-and-installation-of-efk","status":"publish","type":"post","link":"https:\/\/www.linuxdevops.cn\/2020\/09\/kubernetes-helm-deployment-and-installation-of-efk\/","title":{"rendered":"Kubernetes helm \u90e8\u7f72\u5b89\u88c5EFK \u5206\u5e03\u5f0f\u65e5\u5fd7\u6536\u96c6\u7cfb\u7edf"},"content":{"rendered":"\n

\u4e0b\u8f7d\u5b98\u65b9helm chart<\/p>\n\n\n

git clone https:\/\/github.com\/elastic\/helm-charts.git\n\u521b\u5efa\u547d\u540d\u7a7a\u95f4\nkubectl create namespace efk<\/pre>\n\n\n

\u4e00 \u3001 \u90e8\u7f72\u914d\u7f6eelasticsearch<\/strong><\/p>\n\n\n

cd helm-charts\nvim elasticsearch\/values.yaml\n\u6ce8\u610f\u4fee\u6539\u6dfb\u52a0\u4e2d\u6587\u6ce8\u91ca\u7684\u5730\u65b9\u3002<\/span><\/pre>\n\n\n
---\nclusterName: \"elasticsearch\"\nnodeGroup: \"master\"\n\n# The service that non master groups will try to connect to when joining the cluster\n# This should be set to clusterName + \"-\" + nodeGroup for your master group\nmasterService: \"\"\n\n# sticsearchElasticsearch roles that will be applied to this nodeGroup\n# These will be set as environment variables. E.g. node.master=true\nroles:\n  master: \"true\"\n  ingest: \"true\"\n  data: \"true\"\n\nreplicas: 3\nminimumMasterNodes: 2\n\nesMajorVersion: \"\"\n\n# Allows you to add any config files in \/usr\/share\/elasticsearch\/config\/\n# such as elasticsearch.yml and log4j2.properties\nesConfig: {}\n#  elasticsearch.yml: |\n#    key:\n#      nestedkey: value\n#  log4j2.properties: |\n#    key = value\n\n# Extra environment variables to append to this nodeGroup\n# This will be appended to the current 'env:' key. You can use any of the kubernetes env\n# syntax here\nextraEnvs: []\n#  - name: MY_ENVIRONMENT_VAR\n#    value: the_value_goes_here\n\n# Allows you to load environment variables from kubernetes secret or config map\nenvFrom: []\n# - secretRef:\n#     name: env-secret\n# - configMapRef:\n#     name: config-map\n\n# A list of secrets and their paths to mount inside the pod\n# This is useful for mounting certificates for security and for mounting\n# the X-Pack license\nsecretMounts: []\n#  - name: elastic-certificates\n#    secretName: elastic-certificates\n#    path: \/usr\/share\/elasticsearch\/config\/certs\n#    defaultMode: 0755\n# \u56fd\u5916\u955c\u50cf\u4e0b\u8f7d\u6162\uff0c\u4fee\u6539\u4e3a\u963f\u91cc\u4e91\u955c\u50cf\uff08\u516c\u5f00\u955c\u50cf\uff0c\u53ef\u8bbe\u7f6e\u4e0b\u9762\u5730\u5740\u3002\uff09\nimage: \"registry.cn-hangzhou.aliyuncs.com\/wang_feng\/elasticsearch-oss\"\nimageTag: \"7.9.0\"\nimagePullPolicy: \"IfNotPresent\"\n\npodAnnotations: {}\n  # iam.amazonaws.com\/role: es-cluster\n\n# additionals labels\nlabels: {}\n\nesJavaOpts: \"-Xmx1g -Xms1g\"\n\nresources:\n  requests:\n    cpu: \"1000m\"\n    memory: \"2Gi\"\n  limits:\n    cpu: \"1000m\"\n    memory: \"2Gi\"\n\ninitResources: {}\n  # limits:\n  #   cpu: \"25m\"\n  #   # memory: \"128Mi\"\n  # requests:\n  #   cpu: \"25m\"\n  #   memory: \"128Mi\"\n\nsidecarResources: {}\n  # limits:\n  #   cpu: \"25m\"\n  #   # memory: \"128Mi\"\n  # requests:\n  #   cpu: \"25m\"\n  #   memory: \"128Mi\"\n\nnetworkHost: \"0.0.0.0\"\n\nvolumeClaimTemplate:\n  accessModes: [ \"ReadWriteOnce\" ]\n  storageClassName: \"managed-nfs-storage\"  #\u4f7f\u7528\u5b58\u50a8\u7c7b\uff0c\u5982\u4f55\u521b\u5efa\u5b58\u50a8\u7c7b\u89c1\u6587\u7ae0\u672b\u5c3e\n  resources:\n    requests:\n      storage: 5Gi  #\u4f7f\u7528\u5b58\u50a8\u5927\u5c0f\uff0c\u672c\u6587\u6d4b\u8bd5\u73af\u5883\u8bbe\u7f6e\u5c0f\u70b9\u3002\n\nrbac:\n  create: false\n  serviceAccountAnnotations: {}\n  serviceAccountName: \"\"\n\npodSecurityPolicy:\n  create: false\n  name: \"\"\n  spec:\n    privileged: true\n    fsGroup:\n      rule: RunAsAny\n    runAsUser:\n      rule: RunAsAny\n    seLinux:\n      rule: RunAsAny\n    supplementalGroups:\n      rule: RunAsAny\n    volumes:\n      - secret\n      - configMap\n      - persistentVolumeClaim\n\npersistence:\n  enabled: true\n  labels:\n    # Add default labels for the volumeClaimTemplate fo the StatefulSet\n    enabled: false\n  annotations: {}\n\nextraVolumes: []\n  # - name: extras\n  #   emptyDir: {}\n\nextraVolumeMounts: []\n  # - name: extras\n  #   mountPath: \/usr\/share\/extras\n  #   readOnly: true\n\nextraContainers: []\n  # - name: do-something\n  #   image: busybox\n  #   command: ['do', 'something']\n\nextraInitContainers: []\n  # - name: do-something\n  #   image: busybox\n  #   command: ['do', 'something']\n\n# This is the PriorityClass settings as defined in\n# https:\/\/kubernetes.io\/docs\/concepts\/configuration\/pod-priority-preemption\/#priorityclass\npriorityClassName: \"\"\n\n# By default this will make sure two pods don't end up on the same node\n# Changing this to a region would allow you to spread pods across regions\nantiAffinityTopologyKey: \"kubernetes.io\/hostname\"\n\n# Hard means that by default pods will only be scheduled if there are enough nodes for them\n# and that they will never end up on the same node. Setting this to soft will do this \"best effort\"\nantiAffinity: \"hard\"\n\n# This is the node affinity settings as defined in\n# https:\/\/kubernetes.io\/docs\/concepts\/configuration\/assign-pod-node\/#node-affinity-beta-feature\nnodeAffinity: {}\n\n# The default is to deploy all pods serially. By setting this to parallel all pods are started at\n# the same time when bootstrapping the cluster\npodManagementPolicy: \"Parallel\"\n\n# The environment variables injected by service links are not used, but can lead to slow Elasticsearch boot times when\n# there are many services in the current namespace.\n# If you experience slow pod startups you probably want to set this to false<\/code>.\nenableServiceLinks: true\n\nprotocol: http\nhttpPort: 9200\ntransportPort: 9300\n\nservice:\n  labels: {}\n  labelsHeadless: {}\n  type: ClusterIP\n  nodePort: \"\"\n  annotations: {}\n  httpPortName: http\n  transportPortName: transport\n  loadBalancerIP: \"\"\n  loadBalancerSourceRanges: []\n  externalTrafficPolicy: \"\"\n\nupdateStrategy: RollingUpdate\n\n# This is the max unavailable setting for the pod disruption budget\n# The default value of 1 will make sure that kubernetes won't allow more than 1\n# of your pods to be unavailable during maintenance\nmaxUnavailable: 1\n\npodSecurityContext:\n  fsGroup: 1000\n  runAsUser: 1000\n\nsecurityContext:\n  capabilities:\n    drop:\n    - ALL\n  # readOnlyRootFilesystem: true\n  runAsNonRoot: true\n  runAsUser: 1000\n\n# How long to wait for elasticsearch to stop gracefully\nterminationGracePeriod: 120\n\nsysctlVmMaxMapCount: 262144\n\nreadinessProbe:\n  failureThreshold: 3\n  initialDelaySeconds: 30\n  periodSeconds: 10\n  successThreshold: 3\n  timeoutSeconds: 5\n\n# https:\/\/www.elastic.co\/guide\/en\/elasticsearch\/reference\/current\/cluster-health.html#request-params wait_for_status\n#\u4fee\u6539\u8bbe\u7f6e\u96c6\u7fa4\u5065\u5eb7\u68c0\u67e5\n#clusterHealthCheckParams: \"wait_for_status=green&timeout=50s\"\nclusterHealthCheckParams: \"wait_for_status=yellow&timeout=1s\"\n\n## Use an alternate scheduler.\n## ref: https:\/\/kubernetes.io\/docs\/tasks\/administer-cluster\/configure-multiple-schedulers\/\n##\nschedulerName: \"\"\n\nimagePullSecrets: []\nnodeSelector: {}\ntolerations: []\n\n# Enabling this will publically expose your Elasticsearch instance.\n# Only enable this if you have security enabled on your cluster\ningress:\n  enabled: false\n  annotations: {}\n    # kubernetes.io\/ingress.class: nginx\n    # kubernetes.io\/tls-acme: \"true\"\n  path: \/\n  hosts:\n    - chart-example.local\n  tls: []\n  #  - secretName: chart-example-tls\n  #    hosts:\n  #      - chart-example.local\n\nnameOverride: \"\"\nfullnameOverride: \"\"\n\n# https:\/\/github.com\/elastic\/helm-charts\/issues\/63\nmasterTerminationFix: false\n\nlifecycle: {}\n  # preStop:\n  #   exec:\n  #     command: [\"\/bin\/sh\", \"-c\", \"echo Hello from the postStart handler > \/usr\/share\/message\"]\n  # postStart:\n  #   exec:\n  #     command:\n  #       - bash\n  #       - -c\n  #       - |\n  #         #!\/bin\/bash\n  #         # Add a template to adjust number of shards\/replicas\n  #         TEMPLATE_NAME=my_template\n  #         INDEX_PATTERN=\"logstash-*\"\n  #         SHARD_COUNT=8\n  #         REPLICA_COUNT=1\n  #         ES_URL=http:\/\/localhost:9200\n  #         while [[ \"$(curl -s -o \/dev\/null -w '%{http_code}\\n' $ES_URL)\" != \"200\" ]]; do sleep 1; done\n  #         curl -XPUT \"$ES_URL\/_template\/$TEMPLATE_NAME\" -H 'Content-Type: application\/json' -d'{\"index_patterns\":['\\\"\"$INDEX_PATTERN\"\\\"'],\"settings\":{\"number_of_shards\":'$SHARD_COUNT',\"number_of_replicas\":'$REPLICA_COUNT'}}'\n\nsysctlInitContainer:\n  enabled: true\n\nkeystore: []\n\n# Deprecated\n# please use the above podSecurityContext.fsGroup instead\nfsGroup: \"\"\n<\/code><\/pre>\n\n\n

helm \u5b89\u88c5elasticsearch<\/p>\n\n\n

helm install -n efk elasticsearch  elasticsearch\/<\/pre>\n\n\n

\u4e8c\u3001 \u90e8\u7f72\u914d\u7f6efilebeat<\/strong><\/p>\n\n\n

vim filebeat\/values.yaml\n\u6ce8\u610f\u4fee\u6539\u4e2d\u6587\u6ce8\u91ca\u5730\u65b9<\/span><\/pre>\n\n\n
---\n# Allows you to add any config files in \/usr\/share\/filebeat\n# such as filebeat.yml  \n#\u914d\u7f6efilebeat\u914d\u7f6e\u6587\u4ef6\uff0c\u6307\u5b9a\u8bfb\u53d6\u65e5\u5fd7\u4f4d\u7f6e\uff0c\u5b9a\u4e49\u8f93\u51faelasticsearch   index\nfilebeatConfig:\n  filebeat.yml: |\n    filebeat.inputs:\n    - type: container   #\u7c7b\u578b\u5bb9\u5668\n      paths:\n        - \/var\/log\/containers\/*.log   #\u65e5\u5fd7\u4f4d\u7f6e\n      processors:\n      - add_kubernetes_metadata:\n          host: ${NODE_NAME}\n          matchers:\n          - logs_path:\n              logs_path: \"\/var\/log\/containers\/\"\n    - type: container\n      paths:\n        - \/var\/lib\/docker\/containers\/*\/*.log\n      fields:   \n        type: \"kubernetes.container.name\"   #\u81ea\u5b9a\u4e49\u7d22\u5f15\u7c7b\u578b\n      processors:\n      - add_kubernetes_metadata:\n          host: ${NODE_NAME}\n          matchers:\n          - logs_path: \n              logs_path: \"\/var\/lib\/docker\/containers\/\"\n    output.elasticsearch:  #\u8f93\u51fa\u65e5\u5fd7\u5230elasticsearch \n      host: '${NODE_NAME}'\n      hosts: '${ELASTICSEARCH_HOSTS:elasticsearch-master:9200}'\n      indices:\n        - index: \"container-name-%{+yyyy.MM.dd}\"  #\u81ea\u5b9a\u4e49\u7d22\u5f15\u540d\u79f0\uff08\u6309\u65e5\u671f\u4e00\u5929\u4e00\u4e2a\uff09\n          when.equals: \n            fields.type: \"kubernetes.container.name\"\n\n# Extra environment variables to append to the DaemonSet pod spec.\n# This will be appended to the current 'env:' key. You can use any of the kubernetes env\n# syntax here\nextraEnvs:   #\u5b9a\u4e49\u8bed\u8a00\u73af\u5883\n - name: LANG\n   value: en_US.UTF-8\n\nextraVolumeMounts:   #\u6302\u8f7d\u76ee\u5f55\n  - name: shoplog\n    mountPath: \/shop_data\/logs\n    readOnly: true\n  - name: sysdate\n    mountPath: \/etc\/localtime\n\nextraVolumes:    #\u6302\u8f7d\u672c\u5730\u6587\u4ef6\u76ee\u5f55\n  - name: shoplog\n    hostPath:\n      path: \/data\/k8s\/nas-e898efc3-324d-41d2-938a-bf02eb80e098\/shop_data\/logs\n  - name: sysdate\n    hostPath:\n      path: \/etc\/localtime\n\nextraContainers: \"\"\n# - name: dummy-init\n#   image: busybox\n#   command: ['echo', 'hey']\n\nextraInitContainers: []\n# - name: dummy-init\n#   image: busybox\n#   command: ['echo', 'hey']\n\nenvFrom: []\n# - configMapRef:\n#     name: configmap-name\n\n# Root directory where Filebeat will write data to in order to persist registry data across pod restarts (file position and other metadata).\nhostPathRoot: \/var\/lib\nhostNetworking: false\n# \u56fd\u5916\u955c\u50cf\u4e0b\u8f7d\u6162\uff0c\u4fee\u6539\u4e3a\u963f\u91cc\u4e91\u955c\u50cf\uff08\u516c\u5f00\u955c\u50cf\uff0c\u53ef\u8bbe\u7f6e\u4e0b\u9762\u5730\u5740\u3002\uff09\nimage: \"registry.cn-hangzhou.aliyuncs.com\/wang_feng\/filebeat-oss\"\nimageTag: \"7.9.0\"\nimagePullPolicy: \"IfNotPresent\"\nimagePullSecrets: []\n\nlivenessProbe:\n  exec:\n    command:\n      - sh\n      - -c\n      - |\n        #!\/usr\/bin\/env bash -e\n        curl --fail 127.0.0.1:5066\n  failureThreshold: 3\n  initialDelaySeconds: 10\n  periodSeconds: 10\n  timeoutSeconds: 5\n\nreadinessProbe:\n  exec:\n    command:\n      - sh\n      - -c\n      - |\n        #!\/usr\/bin\/env bash -e\n        filebeat test output\n  failureThreshold: 3\n  initialDelaySeconds: 10\n  periodSeconds: 10\n  timeoutSeconds: 5\n\n# Whether this chart should self-manage its service account, role, and associated role binding.\nmanagedServiceAccount: true\n\n# additionals labels\nlabels: {}\n\npodAnnotations: {}\n  # iam.amazonaws.com\/role: es-cluster\n\n# Various pod security context settings. Bear in mind that many of these have an impact on Filebeat functioning properly.\n#\n# - User that the container will execute as. Typically necessary to run as root (0) in order to properly collect host container logs.\n# - Whether to execute the Filebeat containers as privileged containers. Typically not necessarily unless running within environments such as OpenShift.\npodSecurityContext:\n  runAsUser: 0\n  privileged: false\n\nresources:\n  requests:\n    cpu: \"100m\"\n    memory: \"100Mi\"\n  limits:\n    cpu: \"1000m\"\n    memory: \"200Mi\"\n\n# Custom service account override that the pod will use\nserviceAccount: \"\"\n\n# Annotations to add to the ServiceAccount that is created if the serviceAccount value isn't set.\nserviceAccountAnnotations: {}\n  # eks.amazonaws.com\/role-arn: arn:aws:iam::111111111111:role\/k8s.clustername.namespace.serviceaccount\n\n# A list of secrets and their paths to mount inside the pod\n# This is useful for mounting certificates for security other sensitive values\nsecretMounts: []\n#  - name: filebeat-certificates\n#    secretName: filebeat-certificates\n#    path: \/usr\/share\/filebeat\/certs\n\n# How long to wait for Filebeat pods to stop gracefully\nterminationGracePeriod: 30\n\ntolerations: []\n\nnodeSelector: {}\n\naffinity: {}\n\n# This is the PriorityClass settings as defined in\n# https:\/\/kubernetes.io\/docs\/concepts\/configuration\/pod-priority-preemption\/#priorityclass\npriorityClassName: \"\"\n\nupdateStrategy: RollingUpdate\n\n# Override various naming aspects of this chart\n# Only edit these if you know what you're doing\nnameOverride: \"\"\nfullnameOverride: \"\"\n<\/code><\/pre>\n\n\n

helm \u5b89\u88c5filebeat<\/p>\n\n\n

helm install -n efk filebeat  filebeat\/<\/pre>\n\n\n

\u4e09 \u3001 \u90e8\u7f72\u914d\u7f6ekibana<\/strong><\/p>\n\n\n

vim kibana\/values.yaml\n\u6ce8\u610f\u4fee\u6539\u4e2d\u6587\u6ce8\u91ca\u5730\u65b9<\/span><\/pre>\n\n\n
---\n#elasticsearch \u4e3b\u673a\u5730\u5740\u3002\nelasticsearchHosts: \"http:\/\/elasticsearch-master:9200\"\n\nreplicas: 1\n\n# Extra environment variables to append to this nodeGroup\n# This will be appended to the current 'env:' key. You can use any of the kubernetes env\n# syntax here\nextraEnvs:\n  - name: \"NODE_OPTIONS\"\n    value: \"--max-old-space-size=1800\"\n#  - name: MY_ENVIRONMENT_VAR\n#    value: the_value_goes_here\n\n# Allows you to load environment variables from kubernetes secret or config map\nenvFrom: []\n# - secretRef:\n#     name: env-secret\n# - configMapRef:\n#     name: config-map\n\n# A list of secrets and their paths to mount inside the pod\n# This is useful for mounting certificates for security and for mounting\n# the X-Pack license\nsecretMounts: []\n#  - name: kibana-keystore\n#    secretName: kibana-keystore\n#    path: \/usr\/share\/kibana\/data\/kibana.keystore\n#    subPath: kibana.keystore # optional\n# \u56fd\u5916\u955c\u50cf\u4e0b\u8f7d\u6162\uff0c\u4fee\u6539\u4e3a\u963f\u91cc\u4e91\u955c\u50cf\uff08\u516c\u5f00\u955c\u50cf\uff0c\u53ef\u8bbe\u7f6e\u4e0b\u9762\u5730\u5740\u3002\uff09\nimage: \"registry.cn-hangzhou.aliyuncs.com\/wang_feng\/kibana-oss\"\nimageTag: \"7.9.0\"\nimagePullPolicy: \"IfNotPresent\"\n\n# additionals labels\nlabels: {}\n\npodAnnotations: {}\n  # iam.amazonaws.com\/role: es-cluster\n\nresources:\n  requests:\n    cpu: \"1000m\"\n    memory: \"2Gi\"\n  limits:\n    cpu: \"1000m\"\n    memory: \"2Gi\"\n\nprotocol: http\n\nserverHost: \"0.0.0.0\"\n\nhealthCheckPath: \"\/app\/kibana\"\n\n# Allows you to add any config files in \/usr\/share\/kibana\/config\/\n# such as kibana.yml\nkibanaConfig: {}\n#   kibana.yml: |\n#     key:\n#       nestedkey: value\n\n# If Pod Security Policy in use it may be required to specify security context as well as service account\n\npodSecurityContext:\n  fsGroup: 1000\n\nsecurityContext:\n  capabilities:\n    drop:\n    - ALL\n  # readOnlyRootFilesystem: true\n  runAsNonRoot: true\n  runAsUser: 1000\n\nserviceAccount: \"\"\n\n# This is the PriorityClass settings as defined in\n# https:\/\/kubernetes.io\/docs\/concepts\/configuration\/pod-priority-preemption\/#priorityclass\npriorityClassName: \"\"\n\nhttpPort: 5601\n\nextraContainers: \"\"\n# - name: dummy-init\n#   image: busybox\n#   command: ['echo', 'hey']\n\nextraInitContainers: \"\"\n# - name: dummy-init\n#   image: busybox\n#   command: ['echo', 'hey']\n\nupdateStrategy:\n  type: \"Recreate\"\n\nservice:\n  type: ClusterIP\n  loadBalancerIP: \"\"\n  port: 5601\n  nodePort: \"\"\n  labels: {}\n  annotations: {}\n    # cloud.google.com\/load-balancer-type: \"Internal\"\n    # service.beta.kubernetes.io\/aws-load-balancer-internal: 0.0.0.0\/0\n    # service.beta.kubernetes.io\/azure-load-balancer-internal: \"true\"\n    # service.beta.kubernetes.io\/openstack-internal-load-balancer: \"true\"\n    # service.beta.kubernetes.io\/cce-load-balancer-internal-vpc: \"true\"\n  loadBalancerSourceRanges: []\n    # 0.0.0.0\/0\n#\u914d\u7f6eingress \u5b9e\u73b0\u57df\u540d\u8bbf\u95ee\u3002\ningress:\n  enabled: true\n  annotations: \n    kubernetes.io\/ingress.class: nginx\n    nginx.ingress.kubernetes.io\/whitelist-source-range: 124.133.53.77\/32,64.115.0.0\/16,64.105.0.0\/16    #\u914d\u7f6e\u767d\u540d\u5355\u8bbf\u95ee\n    kubernetes.io\/tls-acme: \"true\"\n  path: \/\n  hosts:\n    - kibana.wangfeng.com\n  tls: []\n  #  - secretName: chart-example-tls\n  #    hosts:\n  #      - chart-example.local\n\nreadinessProbe:\n  failureThreshold: 3\n  initialDelaySeconds: 10\n  periodSeconds: 10\n  successThreshold: 3\n  timeoutSeconds: 5\n\nimagePullSecrets: []\nnodeSelector: {}\ntolerations: []\naffinity: {}\n\nnameOverride: \"\"\nfullnameOverride: \"\"\n\nlifecycle: {}\n  # preStop:\n  #   exec:\n  #     command: [\"\/bin\/sh\", \"-c\", \"echo Hello from the postStart handler > \/usr\/share\/message\"]\n  # postStart:\n  #   exec:\n  #     command: [\"\/bin\/sh\", \"-c\", \"echo Hello from the postStart handler > \/usr\/share\/message\"]\n\n# Deprecated - use only with versions < 6.6\nelasticsearchURL: \"\" # \"http:\/\/elasticsearch-master:9200\"\n<\/code><\/pre>\n\n\n

helm \u5b89\u88c5kibana<\/p>\n\n\n

helm install -n efk kibana  kibana\/<\/pre>\n\n\n

\u56db\u3001\u68c0\u67e5<\/strong><\/p>\n\n\n

kubectl get pod -n efk\nkubectl get ingress -n efk\nkubectl get pvc -n efk<\/code><\/pre>\n\n\n
\"\"<\/figure>\n\n\n

\u4e94\u3001\u767b\u5f55kibana \u914d\u7f6e\u7d22\u5f15\u67e5\u770b\u65e5\u5fd7\u3002<\/strong><\/p>\n\n\n

http:\/\/kibana.wangfeng.com\/<\/a><\/p>\n\n\n

\u521b\u5efa\u7d22\u5f15<\/p>\n\n\n

\"\"<\/figure>\n\n\n

\u67e5\u770b\u65e5\u5fd7<\/p>\n\n\n

\"\"<\/figure>\n\n\n

<\/p>\n\n\n

\u5b98\u7f51\uff1ahttps:\/\/www.elastic.co\/cn\/<\/a><\/p>\n\n\n

GitHub\u5730\u5740\uff1ahttps:\/\/github.com\/elastic\/helm-charts<\/a><\/p>\n\n\n

\n
Helm \u521b\u5efa\u57fa\u4e8enfs \u7684kubernetes storageclass<\/a><\/blockquote>