"Fossies" - the Fresh Open Source Software Archive  

Source code changes of the file "devstack/files/influxdb/influxdb.conf" between
monasca-api-3.0.0.tar.gz and monasca-api-3.1.0.tar.gz

About: OpenStack Monasca API is a RESTful API server that is designed with a layered architecture and supports Monitoring as a Service (MONaaS).
The "Train" series (latest release).

influxdb.conf  (monasca-api-3.0.0):influxdb.conf  (monasca-api-3.1.0)
### Welcome to the InfluxDB configuration file. ### Welcome to the InfluxDB configuration file.
# Once every 24 hours InfluxDB will report anonymous data to m.influxdb.com # The values in this file override the default values used by the system if
# The data includes raft id (random 8 bytes), os, arch, version, and metadata. # a config option is not specified. The commented out lines are the configuratio
# We don't track ip addresses of servers reporting. This is only used n
# to track the number of instances running and the versions, which # field and the default value used. Uncommenting a line and changing the value
# is very helpful for us. # will change the value used at runtime when the process is restarted.
# Once every 24 hours InfluxDB will report usage data to usage.influxdata.com
# The data includes a random ID, os, arch, version, the number of series and oth
er
# usage data. No data from user databases is ever transmitted.
# Change this option to true to disable reporting. # Change this option to true to disable reporting.
reporting-disabled = false # reporting-disabled = false
### # Bind address to use for the RPC service for backup and restore.
### Enterprise registration control # bind-address = "127.0.0.1:8088"
###
[registration]
# enabled = true
# url = "https://enterprise.influxdata.com" # The Enterprise server URL
# token = "" # Registration token for Enterprise server
### ###
### [meta] ### [meta]
### ###
### Controls the parameters for the Raft consensus group that stores metadata ### Controls the parameters for the Raft consensus group that stores metadata
### about the InfluxDB cluster. ### about the InfluxDB cluster.
### ###
[meta] [meta]
# Where the metadata/raft database is stored
dir = "/var/lib/influxdb/meta" dir = "/var/lib/influxdb/meta"
hostname = "localhost"
bind-address = ":8089" # Automatically create a default retention policy when creating a database.
retention-autocreate = true # retention-autocreate = true
election-timeout = "1s"
heartbeat-timeout = "1s" # If log messages are printed for the meta service
leader-lease-timeout = "500ms" # logging-enabled = true
commit-timeout = "50ms"
cluster-tracing = false
# If enabled, when a Raft cluster loses a peer due to a `DROP SERVER` command,
# the leader will automatically ask a non-raft peer node to promote to a raft
# peer. This only happens if there is a non-raft peer node available to promot
e.
# This setting only affects the local node, so to ensure if operates correctly
, be sure to set
# it in the config of every node.
raft-promotion-enabled = true
### ###
### [data] ### [data]
### ###
### Controls where the actual shard data for InfluxDB lives and how it is ### Controls where the actual shard data for InfluxDB lives and how it is
### flushed from the WAL. "dir" may need to be changed to a suitable place ### flushed from the WAL. "dir" may need to be changed to a suitable place
### for your system, but the WAL settings are an advanced configuration. The ### for your system, but the WAL settings are an advanced configuration. The
### defaults should work for most systems. ### defaults should work for most systems.
### ###
[data] [data]
# The directory where the TSM storage engine stores TSM files.
dir = "/var/lib/influxdb/data" dir = "/var/lib/influxdb/data"
# Controls the engine type for new shards. Options are b1, bz1, or tsm1. # The directory where the TSM storage engine stores WAL files.
# b1 is the 0.9.2 storage engine, bz1 is the 0.9.3 and 0.9.4 engine.
# tsm1 is the 0.9.5 engine and is currently EXPERIMENTAL. Until 0.9.5 is
# actually released data written into a tsm1 engine may be need to be wiped
# between upgrades.
# engine ="bz1"
# The following WAL settings are for the b1 storage engine used in 0.9.2. They
won't
# apply to any new shards created after upgrading to a version > 0.9.3.
max-wal-size = 104857600 # Maximum size the WAL can reach before a flush. Defa
ults to 100MB.
wal-flush-interval = "10m" # Maximum time data can sit in WAL before a flush.
wal-partition-flush-delay = "2s" # The delay time between each WAL partition b
eing flushed.
# These are the WAL settings for the storage engine >= 0.9.3
wal-dir = "/var/lib/influxdb/wal" wal-dir = "/var/lib/influxdb/wal"
wal-enable-logging = true
# When a series in the WAL in-memory cache reaches this size in bytes it is ma # The amount of time that a write will wait before fsyncing. A duration
rked as ready to # greater than 0 can be used to batch up multiple fsync calls. This is useful
# flush to the index for slower
# wal-ready-series-size = 25600 # disks or when WAL write contention is seen. A value of 0s fsyncs every writ
e to the WAL.
# Flush and compact a partition once this ratio of series are over the ready s # Values in the range of 0-100ms are recommended for non-SSD disks.
ize # wal-fsync-delay = "0s"
# wal-compaction-threshold = 0.6
# The type of shard index to use for new shards. The default is an in-memory
# Force a flush and compaction if any series in a partition gets above this si index that is
ze in bytes # recreated at startup. A value of "tsi1" will use a disk based index that su
# wal-max-series-size = 2097152 pports higher
# cardinality datasets.
# Force a flush of all series and full compaction if there have been no writes # index-version = "inmem"
in this index-version = "tsi1"
# amount of time. This is useful for ensuring that shards that are cold for wr
ites don't # Trace logging provides more verbose output around the tsm engine. Turning
# keep a bunch of data cached in memory and in the WAL. # this on can provide more useful output for debugging tsm engine issues.
# wal-flush-cold-interval = "10m" # trace-logging-enabled = false
# Force a partition to flush its largest series if it reaches this approximate
size in
# bytes. Remember there are 5 partitions so you'll need at least 5x this amoun
t of memory.
# The more memory you have, the bigger this can be.
# wal-partition-size-threshold = 20971520
# Whether queries should be logged before execution. Very useful for troublesh ooting, but will # Whether queries should be logged before execution. Very useful for troublesh ooting, but will
# log any sensitive data contained within a query. # log any sensitive data contained within a query.
# query-log-enabled = true # query-log-enabled = true
### # Validates incoming writes to ensure keys only have valid unicode characters.
### [hinted-handoff] # This setting will incur a small overhead because every key must be checked.
### # validate-keys = false
### Controls the hinted handoff feature, which allows nodes to temporarily
### store queued data when one node of a cluster is down for a short period # Settings for the TSM engine
### of time.
### # CacheMaxMemorySize is the maximum size a shard's cache can
# reach before it starts rejecting writes.
[hinted-handoff] # Valid size suffixes are k, m, or g (case insensitive, 1024 = 1k).
enabled = true # Values without a size suffix are in bytes.
dir = "/var/lib/influxdb/hh" # cache-max-memory-size = "1g"
max-size = 1073741824
max-age = "168h" # CacheSnapshotMemorySize is the size at which the engine will
retry-rate-limit = 0 # snapshot the cache and write it to a TSM file, freeing up memory
# Valid size suffixes are k, m, or g (case insensitive, 1024 = 1k).
# Hinted handoff will start retrying writes to down nodes at a rate of once pe # Values without a size suffix are in bytes.
r second. # cache-snapshot-memory-size = "25m"
# If any error occurs, it will backoff in an exponential manner, until the int
erval # CacheSnapshotWriteColdDuration is the length of time at
# reaches retry-max-interval. Once writes to all nodes are successfully comple # which the engine will snapshot the cache and write it to
ted the # a new TSM file if the shard hasn't received writes or deletes
# interval will reset to retry-interval. # cache-snapshot-write-cold-duration = "10m"
retry-interval = "1s"
retry-max-interval = "1m" # CompactFullWriteColdDuration is the duration at which the engine
# will compact all TSM files in a shard if it hasn't received a
# Interval between running checks for data that should be purged. Data is purg # write or delete
ed from # compact-full-write-cold-duration = "4h"
# hinted-handoff queues for two reasons. 1) The data is older than the max age
, or # The maximum number of concurrent full and level compactions that can run at
# 2) the target node has been dropped from the cluster. Data is never dropped one time. A
until # value of 0 results in 50% of runtime.GOMAXPROCS(0) used at runtime. Any num
# it has reached max-age however, for a dropped node or not. ber greater
purge-interval = "1h" # than 0 limits compactions to that value. This setting does not apply
# to cache snapshotting.
### # max-concurrent-compactions = 0
### [cluster]
### # CompactThroughput is the rate limit in bytes per second that we
### Controls non-Raft cluster behavior, which generally includes how data is # will allow TSM compactions to write to disk. Note that short bursts are allo
### shared across shards. wed
### # to happen at a possibly larger value, set by CompactThroughputBurst
# compact-throughput = "48m"
[cluster]
shard-writer-timeout = "10s" # The time within which a shard must respond to w # CompactThroughputBurst is the rate limit in bytes per second that we
rite. # will allow TSM compactions to write to disk.
write-timeout = "5s" # The time within which a write operation must complete o # compact-throughput-burst = "48m"
n the cluster.
# If true, then the mmap advise value MADV_WILLNEED will be provided to the ke
rnel with respect to
# TSM files. This setting has been found to be problematic on some kernels, an
d defaults to off.
# It might help users who have slow disks in some cases.
# tsm-use-madv-willneed = false
# Settings for the inmem index
# The maximum series allowed per database before writes are dropped. This lim
it can prevent
# high cardinality issues at the database level. This limit can be disabled b
y setting it to
# 0.
# max-series-per-database = 1000000
# The maximum number of tag values per tag that are allowed before writes are
dropped. This limit
# can prevent high cardinality tag values from being written to a measurement.
This limit can be
# disabled by setting it to 0.
# max-values-per-tag = 100000
# Settings for the tsi1 index
# The threshold, in bytes, when an index write-ahead log file will compact
# into an index file. Lower sizes will cause log files to be compacted more
# quickly and result in lower heap usage at the expense of write throughput.
# Higher sizes will be compacted less frequently, store more series in-memory,
# and provide higher write throughput.
# Valid size suffixes are k, m, or g (case insensitive, 1024 = 1k).
# Values without a size suffix are in bytes.
# max-index-log-file-size = "1m"
# The size of the internal cache used in the TSI index to store previously
# calculated series results. Cached results will be returned quickly from the
cache rather
# than needing to be recalculated when a subsequent query with a matching tag
key/value
# predicate is executed. Setting this value to 0 will disable the cache, which
may
# lead to query performance issues.
# This value should only be increased if it is known that the set of regularly
used
# tag key/value predicates across all measurements for a database is larger th
an 100. An
# increase in cache size may lead to an increase in heap usage.
series-id-set-cache-size = 100
###
### [coordinator]
###
### Controls the clustering service configuration.
###
[coordinator]
# The default time a write request will wait until a "timeout" error is return
ed to the caller.
# write-timeout = "10s"
# The maximum number of concurrent queries allowed to be executing at one time
. If a query is
# executed and exceeds this limit, an error is returned to the caller. This l
imit can be disabled
# by setting it to 0.
# max-concurrent-queries = 0
# The maximum time a query will is allowed to execute before being killed by t
he system. This limit
# can help prevent run away queries. Setting the value to 0 disables the limi
t.
# query-timeout = "0s"
# The time threshold when a query will be logged as a slow query. This limit
can be set to help
# discover slow or resource intensive queries. Setting the value to 0 disable
s the slow query logging.
# log-queries-after = "0s"
# The maximum number of points a SELECT can process. A value of 0 will make
# the maximum point count unlimited. This will only be checked every second s
o queries will not
# be aborted immediately when hitting the limit.
# max-select-point = 0
# The maximum number of series a SELECT can run. A value of 0 will make the m
aximum series
# count unlimited.
# max-select-series = 0
# The maximum number of group by time bucket a SELECT can create. A value of
zero will max the maximum
# number of buckets unlimited.
# max-select-buckets = 0
### ###
### [retention] ### [retention]
### ###
### Controls the enforcement of retention policies for evicting old data. ### Controls the enforcement of retention policies for evicting old data.
### ###
[retention] [retention]
enabled = true # Determines whether retention policy enforcement enabled.
check-interval = "30m" # enabled = true
# The interval of time when retention policy enforcement checks run.
# check-interval = "30m"
### ###
### [shard-precreation] ### [shard-precreation]
### ###
### Controls the precreation of shards, so they are created before data arrives. ### Controls the precreation of shards, so they are available before data arrive
### Only shards that will exist in the future, at time of creation, are precreat s.
ed. ### Only shards that, after creation, will have both a start- and end-time in th
e
### future, will ever be created. Shards are never precreated that would be whol
ly
### or partially in the past.
[shard-precreation] [shard-precreation]
enabled = true # Determines whether shard pre-creation service is enabled.
check-interval = "10m" # enabled = true
advance-period = "30m"
# The interval of time when the check to pre-create new shards runs.
# check-interval = "10m"
# The default period ahead of the endtime of a shard group that its successor
# group is created.
# advance-period = "30m"
### ###
### Controls the system self-monitoring, statistics and diagnostics. ### Controls the system self-monitoring, statistics and diagnostics.
### ###
### The internal database for monitoring data is created automatically if ### The internal database for monitoring data is created automatically if
### if it does not already exist. The target retention within this database ### if it does not already exist. The target retention within this database
### is called 'monitor' and is also created with a retention period of 7 days ### is called 'monitor' and is also created with a retention period of 7 days
### and a replication factor of 1, if it does not exist. In all cases the ### and a replication factor of 1, if it does not exist. In all cases the
### this retention policy is configured as the default for the database. ### this retention policy is configured as the default for the database.
[monitor] [monitor]
store-enabled = true # Whether to record statistics internally. # Whether to record statistics internally.
store-database = "_internal" # The destination database for recorded statistic # store-enabled = true
s
store-interval = "10s" # The interval at which to record statistics
### # The destination database for recorded statistics
### [admin] # store-database = "_internal"
###
### Controls the availability of the built-in, web-based admin interface. If HTT
PS is
### enabled for the admin interface, HTTPS must also be enabled on the [http] se
rvice.
###
[admin] # The interval at which to record statistics
enabled = true # store-interval = "10s"
bind-address = ":8083"
https-enabled = false
https-certificate = "/etc/ssl/influxdb.pem"
### ###
### [http] ### [http]
### ###
### Controls how the HTTP endpoints are configured. These are the primary ### Controls how the HTTP endpoints are configured. These are the primary
### mechanism for getting data into and out of InfluxDB. ### mechanism for getting data into and out of InfluxDB.
### ###
[http] [http]
enabled = true # Determines whether HTTP endpoint is enabled.
bind-address = ":8086" # enabled = true
auth-enabled = false
log-enabled = true # Determines whether the Flux query endpoint is enabled.
write-tracing = false # flux-enabled = false
# Determines whether the Flux query logging is enabled.
# flux-log-enabled = false
# The bind address used by the HTTP service.
# bind-address = ":8086"
# Determines whether user authentication is enabled over HTTP/HTTPS.
# auth-enabled = false
# The default realm sent back when issuing a basic auth challenge.
# realm = "InfluxDB"
# Determines whether HTTP request logging is enabled.
# log-enabled = true
# Determines whether the HTTP write request logs should be suppressed when the
log is enabled.
# suppress-write-log = false
# When HTTP request logging is enabled, this option specifies the path where
# log entries should be written. If unspecified, the default is to write to st
derr, which
# intermingles HTTP logs with internal InfluxDB logging.
#
# If influxd is unable to access the specified path, it will log an error and
fall back to writing
# the request log to stderr.
# access-log-path = ""
# Filters which requests should be logged. Each filter is of the pattern NNN,
NNX, or NXX where N is
# a number and X is a wildcard for any number. To filter all 5xx responses, us
e the string 5xx.
# If multiple filters are used, then only one has to match. The default is to
have no filters which
# will cause every request to be printed.
# access-log-status-filters = []
# Determines whether detailed write logging is enabled.
# write-tracing = false
# Determines whether the pprof endpoint is enabled. This endpoint is used for
# troubleshooting and monitoring.
# pprof-enabled = true
pprof-enabled = false pprof-enabled = false
https-enabled = false
https-certificate = "/etc/ssl/influxdb.pem" # Enables a pprof endpoint that binds to localhost:6060 immediately on startup
.
# This is only needed to debug startup issues.
# debug-pprof-enabled = false
# Determines whether HTTPS is enabled.
# https-enabled = false
# The SSL certificate to use when HTTPS is enabled.
# https-certificate = "/etc/ssl/influxdb.pem"
# Use a separate private key location.
# https-private-key = ""
# The JWT auth shared secret to validate requests using JSON web tokens.
# shared-secret = ""
# The default chunk size for result sets that should be chunked.
# max-row-limit = 0
# The maximum number of HTTP connections that may be open at once. New connec
tions that
# would exceed this limit are dropped. Setting this value to 0 disables the l
imit.
# max-connection-limit = 0
# Enable http service over unix domain socket
# unix-socket-enabled = false
# The path of the unix domain socket.
# bind-socket = "/var/run/influxdb.sock"
# The maximum size of a client request body, in bytes. Setting this value to 0
disables the limit.
# max-body-size = 25000000
# The maximum number of writes processed concurrently.
# Setting this to 0 disables the limit.
# max-concurrent-write-limit = 0
# The maximum number of writes queued for processing.
# Setting this to 0 disables the limit.
# max-enqueued-write-limit = 0
# The maximum duration for a write to wait in the queue to be processed.
# Setting this to 0 or setting max-concurrent-write-limit to 0 disables the li
mit.
# enqueued-write-timeout = 0
###
### [logging]
###
### Controls how the logger emits logs to the output.
###
[logging]
# Determines which log encoder to use for logs. Available options
# are auto, logfmt, and json. auto will use a more a more user-friendly
# output format if the output terminal is a TTY, but the format is not as
# easily machine-readable. When the output is a non-TTY, auto will use
# logfmt.
# format = "auto"
# Determines which level of logs will be emitted. The available levels
# are error, warn, info, and debug. Logs that are equal to or above the
# specified level will be emitted.
# level = "info"
# Suppresses the logo output that is printed when the program is started.
# The logo is always suppressed if STDOUT is not a TTY.
# suppress-logo = false
###
### [subscriber]
###
### Controls the subscriptions, which can be used to fork a copy of all data
### received by the InfluxDB host.
###
[subscriber]
# Determines whether the subscriber service is enabled.
# enabled = true
# The default timeout for HTTP writes to subscribers.
# http-timeout = "30s"
# Allows insecure HTTPS connections to subscribers. This is useful when testi
ng with self-
# signed certificates.
# insecure-skip-verify = false
# The path to the PEM encoded CA certs file. If the empty string, the default
system certs will be used
# ca-certs = ""
# The number of writer goroutines processing the write channel.
# write-concurrency = 40
# The number of in-flight writes buffered in the write channel.
# write-buffer-size = 1000
### ###
### [[graphite]] ### [[graphite]]
### ###
### Controls one or many listeners for Graphite data. ### Controls one or many listeners for Graphite data.
### ###
[[graphite]] [[graphite]]
enabled = false # Determines whether the graphite endpoint is enabled.
# enabled = false
# database = "graphite" # database = "graphite"
# retention-policy = ""
# bind-address = ":2003" # bind-address = ":2003"
# protocol = "tcp" # protocol = "tcp"
# consistency-level = "one" # consistency-level = "one"
# name-separator = "."
# These next lines control how batching works. You should have this enabled # These next lines control how batching works. You should have this enabled
# otherwise you could get dropped metrics or poor performance. Batching # otherwise you could get dropped metrics or poor performance. Batching
# will buffer points in memory if you have many coming in. # will buffer points in memory if you have many coming in.
# batch-size = 1000 # will flush if this many points get buffered # Flush if this many points get buffered
# batch-pending = 5 # number of batches that may be pending in memory # batch-size = 5000
# batch-timeout = "1s" # will flush at least this often even if we haven't hit
buffer limit # number of batches that may be pending in memory
# udp-read-buffer = 0 # UDP Read buffer size, 0 means OS default. UDP listener # batch-pending = 10
will fail if set above OS max.
# Flush at least this often even if we haven't hit buffer limit
## "name-schema" configures tag names for parsing the metric name from graphit # batch-timeout = "1s"
e protocol;
## separated by `name-separator`. # UDP Read buffer size, 0 means OS default. UDP listener will fail if set abov
## The "measurement" tag is special and the corresponding field will become e OS max.
## the name of the metric. # udp-read-buffer = 0
## e.g. "type.host.measurement.device" will parse "server.localhost.cpu.cpu0"
as ### This string joins multiple matching 'measurement' values providing more co
## { ntrol over the final measurement name.
## measurement: "cpu", # separator = "."
## tags: {
## "type": "server", ### Default tags that will be added to all metrics. These can be overridden a
## "host": "localhost, t the template level
## "device": "cpu0" ### or by tags extracted from metric
## } # tags = ["region=us-east", "zone=1c"]
## }
# name-schema = "type.host.measurement.device" ### Each template line requires a template pattern. It can have an optional
### filter before the template and separated by spaces. It can also have opti
## If set to true, when the input metric name has more fields than `name-schem onal extra
a` specified, ### tags following the template. Multiple tags should be separated by commas
## the extra fields will be ignored. and no spaces
## Otherwise an error will be logged and the metric rejected. ### similar to the line protocol format. There can be only one default templa
# ignore-unnamed = true te.
# templates = [
# "*.app env.service.resource.measurement",
# # Default template
# "server.*",
# ]
### ###
### [collectd] ### [collectd]
### ###
### Controls the listener for collectd data. ### Controls one or many listeners for collectd data.
### ###
[collectd] [[collectd]]
enabled = false # enabled = false
# bind-address = "" # bind-address = ":25826"
# database = "" # database = "collectd"
# typesdb = "" # retention-policy = ""
#
# The collectd service supports either scanning a directory for multiple types
# db files, or specifying a single db file.
# typesdb = "/usr/local/share/collectd"
#
# security-level = "none"
# auth-file = "/etc/collectd/auth_file"
# These next lines control how batching works. You should have this enabled # These next lines control how batching works. You should have this enabled
# otherwise you could get dropped metrics or poor performance. Batching # otherwise you could get dropped metrics or poor performance. Batching
# will buffer points in memory if you have many coming in. # will buffer points in memory if you have many coming in.
# batch-size = 1000 # will flush if this many points get buffered # Flush if this many points get buffered
# batch-pending = 5 # number of batches that may be pending in memory # batch-size = 5000
# batch-timeout = "1s" # will flush at least this often even if we haven't hit
buffer limit # Number of batches that may be pending in memory
# read-buffer = 0 # UDP Read buffer size, 0 means OS default. UDP listener wil # batch-pending = 10
l fail if set above OS max.
# Flush at least this often even if we haven't hit buffer limit
# batch-timeout = "10s"
# UDP Read buffer size, 0 means OS default. UDP listener will fail if set abov
e OS max.
# read-buffer = 0
# Multi-value plugins can be handled two ways.
# "split" will parse and store the multi-value plugin data into separate measu
rements
# "join" will parse and store the multi-value plugin as a single multi-value m
easurement.
# "split" is the default behavior for backward compatibility with previous ver
sions of influxdb.
# parse-multivalue-plugin = "split"
### ###
### [opentsdb] ### [opentsdb]
### ###
### Controls the listener for OpenTSDB data. ### Controls one or many listeners for OpenTSDB data.
### ###
[opentsdb] [[opentsdb]]
enabled = false # enabled = false
# bind-address = ":4242" # bind-address = ":4242"
# database = "opentsdb" # database = "opentsdb"
# retention-policy = "" # retention-policy = ""
# consistency-level = "one" # consistency-level = "one"
# tls-enabled = false # tls-enabled = false
# certificate= "" # certificate= "/etc/ssl/influxdb.pem"
# Log an error for every malformed point.
# log-point-errors = true
# These next lines control how batching works. You should have this enabled # These next lines control how batching works. You should have this enabled
# otherwise you could get dropped metrics or poor performance. Only points # otherwise you could get dropped metrics or poor performance. Only points
# metrics received over the telnet protocol undergo batching. # metrics received over the telnet protocol undergo batching.
# batch-size = 1000 # will flush if this many points get buffered # Flush if this many points get buffered
# batch-pending = 5 # number of batches that may be pending in memory # batch-size = 1000
# batch-timeout = "1s" # will flush at least this often even if we haven't hit
buffer limit # Number of batches that may be pending in memory
# batch-pending = 5
# Flush at least this often even if we haven't hit buffer limit
# batch-timeout = "1s"
### ###
### [[udp]] ### [[udp]]
### ###
### Controls the listeners for InfluxDB line protocol data via UDP. ### Controls the listeners for InfluxDB line protocol data via UDP.
### ###
[[udp]] [[udp]]
enabled = false # enabled = false
# bind-address = "" # bind-address = ":8089"
# database = "udp" # database = "udp"
# retention-policy = "" # retention-policy = ""
# InfluxDB precision for timestamps on received points ("" or "n", "u", "ms",
"s", "m", "h")
# precision = ""
# These next lines control how batching works. You should have this enabled # These next lines control how batching works. You should have this enabled
# otherwise you could get dropped metrics or poor performance. Batching # otherwise you could get dropped metrics or poor performance. Batching
# will buffer points in memory if you have many coming in. # will buffer points in memory if you have many coming in.
# batch-size = 1000 # will flush if this many points get buffered # Flush if this many points get buffered
# batch-pending = 5 # number of batches that may be pending in memory # batch-size = 5000
# batch-timeout = "1s" # will flush at least this often even if we haven't hit
buffer limit # Number of batches that may be pending in memory
# read-buffer = 0 # UDP Read buffer size, 0 means OS default. UDP listener wil # batch-pending = 10
l fail if set above OS max.
# Will flush at least this often even if we haven't hit buffer limit
# batch-timeout = "1s"
# UDP Read buffer size, 0 means OS default. UDP listener will fail if set abov
e OS max.
# read-buffer = 0
### ###
### [continuous_queries] ### [continuous_queries]
### ###
### Controls how continuous queries are run within InfluxDB. ### Controls how continuous queries are run within InfluxDB.
### ###
[continuous_queries] [continuous_queries]
log-enabled = true # Determines whether the continuous query service is enabled.
enabled = true # enabled = true
recompute-previous-n = 2
recompute-no-older-than = "10m" # Controls whether queries are logged when executed by the CQ service.
compute-runs-per-interval = 10 # log-enabled = true
compute-no-more-than = "2m"
# Controls whether queries are logged to the self-monitoring data store.
# query-stats-enabled = false
# interval for how often continuous queries will be checked if they need to ru
n
# run-interval = "1s"
###
### [tls]
###
### Global configuration settings for TLS in InfluxDB.
###
[tls]
# Determines the available set of cipher suites. See https://golang.org/pkg/cr
ypto/tls/#pkg-constants
# for a list of available ciphers, which depends on the version of Go (use the
query
# SHOW DIAGNOSTICS to see the version of Go used to build InfluxDB). If not sp
ecified, uses
# the default settings from Go's crypto/tls package.
# ciphers = [
# "TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305",
# "TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256",
# ]
# Minimum version of the tls protocol that will be negotiated. If not specifie
d, uses the
# default settings from Go's crypto/tls package.
# min-version = "tls1.2"
# Maximum version of the tls protocol that will be negotiated. If not specifie
d, uses the
# default settings from Go's crypto/tls package.
# max-version = "tls1.2"
 End of changes. 34 change blocks. 
212 lines changed or deleted 462 lines changed or added

Home  |  About  |  Features  |  All  |  Newest  |  Dox  |  Diffs  |  RSS Feeds  |  Screenshots  |  Comments  |  Imprint  |  Privacy  |  HTTP(S)