"Fossies" - the Fresh Open Source Software Archive

Member "swift-2.21.0/etc/proxy-server.conf-sample" (25 Mar 2019, 50538 Bytes) of package /linux/misc/openstack/swift-2.21.0.tar.gz:


As a special service "Fossies" has tried to format the requested text file into HTML format (style: standard) with prefixed line numbers. Alternatively you can here view or download the uninterpreted source code file. See also the latest Fossies "Diffs" side-by-side code changes report for "proxy-server.conf-sample": 2.19.1_vs_2.21.0.

    1 [DEFAULT]
    2 # bind_ip = 0.0.0.0
    3 bind_port = 8080
    4 # keep_idle = 600
    5 # bind_timeout = 30
    6 # backlog = 4096
    7 # swift_dir = /etc/swift
    8 # user = swift
    9 
   10 # Enables exposing configuration settings via HTTP GET /info.
   11 # expose_info = true
   12 
   13 # Key to use for admin calls that are HMAC signed.  Default is empty,
   14 # which will disable admin calls to /info.
   15 # admin_key = secret_admin_key
   16 #
   17 # Allows the ability to withhold sections from showing up in the public calls
   18 # to /info.  You can withhold subsections by separating the dict level with a
   19 # ".".  The following would cause the sections 'container_quotas' and 'tempurl'
   20 # to not be listed, and the key max_failed_deletes would be removed from
   21 # bulk_delete.  Default value is 'swift.valid_api_versions' which allows all
   22 # registered features to be listed via HTTP GET /info except
   23 # swift.valid_api_versions information
   24 # disallowed_sections = swift.valid_api_versions, container_quotas, tempurl
   25 
   26 # Use an integer to override the number of pre-forked processes that will
   27 # accept connections.  Should default to the number of effective cpu
   28 # cores in the system.  It's worth noting that individual workers will
   29 # use many eventlet co-routines to service multiple concurrent requests.
   30 # workers = auto
   31 #
   32 # Maximum concurrent requests per worker
   33 # max_clients = 1024
   34 #
   35 # Set the following two lines to enable SSL. This is for testing only.
   36 # cert_file = /etc/swift/proxy.crt
   37 # key_file = /etc/swift/proxy.key
   38 #
   39 # expiring_objects_container_divisor = 86400
   40 # expiring_objects_account_name = expiring_objects
   41 #
   42 # You can specify default log routing here if you want:
   43 # log_name = swift
   44 # log_facility = LOG_LOCAL0
   45 # log_level = INFO
   46 # log_headers = false
   47 # log_address = /dev/log
   48 # The following caps the length of log lines to the value given; no limit if
   49 # set to 0, the default.
   50 # log_max_line_length = 0
   51 #
   52 # This optional suffix (default is empty) that would be appended to the swift transaction
   53 # id allows one to easily figure out from which cluster that X-Trans-Id belongs to.
   54 # This is very useful when one is managing more than one swift cluster.
   55 # trans_id_suffix =
   56 #
   57 # comma separated list of functions to call to setup custom log handlers.
   58 # functions get passed: conf, name, log_to_console, log_route, fmt, logger,
   59 # adapted_logger
   60 # log_custom_handlers =
   61 #
   62 # If set, log_udp_host will override log_address
   63 # log_udp_host =
   64 # log_udp_port = 514
   65 #
   66 # You can enable StatsD logging here:
   67 # log_statsd_host =
   68 # log_statsd_port = 8125
   69 # log_statsd_default_sample_rate = 1.0
   70 # log_statsd_sample_rate_factor = 1.0
   71 # log_statsd_metric_prefix =
   72 #
   73 # List of origin hosts that are allowed for CORS requests in addition to what
   74 # the container has set.
   75 # Use a comma separated list of full URL (http://foo.bar:1234,https://foo.bar)
   76 # cors_allow_origin =
   77 
   78 # If True (default) then CORS requests are only allowed if their Origin header
   79 # matches an allowed origin. Otherwise, any Origin is allowed.
   80 # strict_cors_mode = True
   81 #
   82 # Comma separated list of headers to expose through Access-Control-Expose-Headers,
   83 # in addition to the defaults and any headers set in container metadata (see
   84 # CORS documentation).
   85 # cors_expose_headers =
   86 #
   87 # client_timeout = 60
   88 # eventlet_debug = false
   89 #
   90 # You can set scheduling priority of processes. Niceness values range from -20
   91 # (most favorable to the process) to 19 (least favorable to the process).
   92 # nice_priority =
   93 #
   94 # You can set I/O scheduling class and priority of processes. I/O niceness
   95 # class values are IOPRIO_CLASS_RT (realtime), IOPRIO_CLASS_BE (best-effort) and
   96 # IOPRIO_CLASS_IDLE (idle). I/O niceness priority is a number which goes from
   97 # 0 to 7. The higher the value, the lower the I/O priority of the process.
   98 # Work only with ionice_class.
   99 # ionice_class =
  100 # ionice_priority =
  101 
  102 [pipeline:main]
  103 # This sample pipeline uses tempauth and is used for SAIO dev work and
  104 # testing. See below for a pipeline using keystone.
  105 pipeline = catch_errors gatekeeper healthcheck proxy-logging cache listing_formats container_sync bulk tempurl ratelimit tempauth copy container-quotas account-quotas slo dlo versioned_writes symlink proxy-logging proxy-server
  106 
  107 # The following pipeline shows keystone integration. Comment out the one
  108 # above and uncomment this one. Additional steps for integrating keystone are
  109 # covered further below in the filter sections for authtoken and keystoneauth.
  110 #pipeline = catch_errors gatekeeper healthcheck proxy-logging cache container_sync bulk tempurl ratelimit authtoken keystoneauth copy container-quotas account-quotas slo dlo versioned_writes symlink proxy-logging proxy-server
  111 
  112 [app:proxy-server]
  113 use = egg:swift#proxy
  114 # You can override the default log routing for this app here:
  115 # set log_name = proxy-server
  116 # set log_facility = LOG_LOCAL0
  117 # set log_level = INFO
  118 # set log_address = /dev/log
  119 #
  120 # When deployed behind a proxy, load balancer, or SSL terminator that is
  121 # configured to speak the human-readable (v1) PROXY protocol (see
  122 # http://www.haproxy.org/download/1.7/doc/proxy-protocol.txt), you should set
  123 # this option to true.  The proxy-server will populate the client connection
  124 # information using the PROXY protocol and reject any connection missing a
  125 # valid PROXY line with a 400.  Only v1 (human-readable) of the PROXY protocol
  126 # is supported.
  127 # require_proxy_protocol = false
  128 #
  129 # log_handoffs = true
  130 # recheck_account_existence = 60
  131 # recheck_container_existence = 60
  132 # object_chunk_size = 65536
  133 # client_chunk_size = 65536
  134 #
  135 # How long the proxy server will wait on responses from the a/c/o servers.
  136 # node_timeout = 10
  137 #
  138 # How long the proxy server will wait for an initial response and to read a
  139 # chunk of data from the object servers while serving GET / HEAD requests.
  140 # Timeouts from these requests can be recovered from so setting this to
  141 # something lower than node_timeout would provide quicker error recovery
  142 # while allowing for a longer timeout for non-recoverable requests (PUTs).
  143 # Defaults to node_timeout, should be overridden if node_timeout is set to a
  144 # high number to prevent client timeouts from firing before the proxy server
  145 # has a chance to retry.
  146 # recoverable_node_timeout = node_timeout
  147 #
  148 # conn_timeout = 0.5
  149 #
  150 # How long to wait for requests to finish after a quorum has been established.
  151 # post_quorum_timeout = 0.5
  152 #
  153 # How long without an error before a node's error count is reset. This will
  154 # also be how long before a node is reenabled after suppression is triggered.
  155 # error_suppression_interval = 60
  156 #
  157 # How many errors can accumulate before a node is temporarily ignored.
  158 # error_suppression_limit = 10
  159 #
  160 # If set to 'true' any authorized user may create and delete accounts; if
  161 # 'false' no one, even authorized, can.
  162 # allow_account_management = false
  163 #
  164 # If set to 'true' authorized accounts that do not yet exist within the Swift
  165 # cluster will be automatically created.
  166 # account_autocreate = false
  167 #
  168 # If set to a positive value, trying to create a container when the account
  169 # already has at least this maximum containers will result in a 403 Forbidden.
  170 # Note: This is a soft limit, meaning a user might exceed the cap for
  171 # recheck_account_existence before the 403s kick in.
  172 # max_containers_per_account = 0
  173 #
  174 # This is a comma separated list of account hashes that ignore the
  175 # max_containers_per_account cap.
  176 # max_containers_whitelist =
  177 #
  178 # Comma separated list of Host headers to which the proxy will deny requests.
  179 # deny_host_headers =
  180 #
  181 # Prefix used when automatically creating accounts.
  182 # auto_create_account_prefix = .
  183 #
  184 # Depth of the proxy put queue.
  185 # put_queue_depth = 10
  186 #
  187 # During GET and HEAD requests, storage nodes can be chosen at random
  188 # (shuffle), by using timing measurements (timing), or by using an explicit
  189 # region/zone match (affinity). Using timing measurements may allow for lower
  190 # overall latency, while using affinity allows for finer control. In both the
  191 # timing and affinity cases, equally-sorting nodes are still randomly chosen to
  192 # spread load.
  193 # The valid values for sorting_method are "affinity", "shuffle", or "timing".
  194 # This option may be overridden in a per-policy configuration section.
  195 # sorting_method = shuffle
  196 #
  197 # If the "timing" sorting_method is used, the timings will only be valid for
  198 # the number of seconds configured by timing_expiry.
  199 # timing_expiry = 300
  200 #
  201 # By default on a GET/HEAD swift will connect to a storage node one at a time
  202 # in a single thread. There is smarts in the order they are hit however. If you
  203 # turn on concurrent_gets below, then replica count threads will be used.
  204 # With addition of the concurrency_timeout option this will allow swift to send
  205 # out GET/HEAD requests to the storage nodes concurrently and answer with the
  206 # first to respond. With an EC policy the parameter only affects HEAD requests.
  207 # concurrent_gets = off
  208 #
  209 # This parameter controls how long to wait before firing off the next
  210 # concurrent_get thread. A value of 0 would be fully concurrent, any other
  211 # number will stagger the firing of the threads. This number should be
  212 # between 0 and node_timeout. The default is what ever you set for the
  213 # conn_timeout parameter.
  214 # concurrency_timeout = 0.5
  215 #
  216 # Set to the number of nodes to contact for a normal request. You can use
  217 # '* replicas' at the end to have it use the number given times the number of
  218 # replicas for the ring being used for the request.
  219 # request_node_count = 2 * replicas
  220 #
  221 # Specifies which backend servers to prefer on reads. Format is a comma
  222 # separated list of affinity descriptors of the form <selection>=<priority>.
  223 # The <selection> may be r<N> for selecting nodes in region N or r<N>z<M> for
  224 # selecting nodes in region N, zone M. The <priority> value should be a whole
  225 # number that represents the priority to be given to the selection; lower
  226 # numbers are higher priority.
  227 #
  228 # Example: first read from region 1 zone 1, then region 1 zone 2, then
  229 # anything in region 2, then everything else:
  230 # read_affinity = r1z1=100, r1z2=200, r2=300
  231 # Default is empty, meaning no preference.
  232 # This option may be overridden in a per-policy configuration section.
  233 # read_affinity =
  234 #
  235 # Specifies which backend servers to prefer on object writes. Format is a comma
  236 # separated list of affinity descriptors of the form r<N> for region N or
  237 # r<N>z<M> for region N, zone M. If this is set, then when handling an object
  238 # PUT request, some number (see setting write_affinity_node_count) of local
  239 # backend servers will be tried before any nonlocal ones.
  240 #
  241 # Example: try to write to regions 1 and 2 before writing to any other
  242 # nodes:
  243 # write_affinity = r1, r2
  244 # Default is empty, meaning no preference.
  245 # This option may be overridden in a per-policy configuration section.
  246 # write_affinity =
  247 #
  248 # The number of local (as governed by the write_affinity setting) nodes to
  249 # attempt to contact first on writes, before any non-local ones. The value
  250 # should be an integer number, or use '* replicas' at the end to have it use
  251 # the number given times the number of replicas for the ring being used for the
  252 # request.
  253 # This option may be overridden in a per-policy configuration section.
  254 # write_affinity_node_count = 2 * replicas
  255 #
  256 # The number of local (as governed by the write_affinity setting) handoff nodes
  257 # to attempt to contact on deletion, in addition to primary nodes.
  258 #
  259 # Example: in geographically distributed deployment of 2 regions, If
  260 # replicas=3, sometimes there may be 1 primary node and 2 local handoff nodes
  261 # in one region holding the object after uploading but before object replicated
  262 # to the appropriate locations in other regions. In this case, include these
  263 # handoff nodes to send request when deleting object could help make correct
  264 # decision for the response. The default value 'auto' means Swift will
  265 # calculate the number automatically, the default value is
  266 # (replicas - len(local_primary_nodes)). This option may be overridden in a
  267 # per-policy configuration section.
  268 # write_affinity_handoff_delete_count = auto
  269 #
  270 # These are the headers whose values will only be shown to swift_owners. The
  271 # exact definition of a swift_owner is up to the auth system in use, but
  272 # usually indicates administrative responsibilities.
  273 # swift_owner_headers = x-container-read, x-container-write, x-container-sync-key, x-container-sync-to, x-account-meta-temp-url-key, x-account-meta-temp-url-key-2, x-container-meta-temp-url-key, x-container-meta-temp-url-key-2, x-account-access-control
  274 #
  275 # You can set scheduling priority of processes. Niceness values range from -20
  276 # (most favorable to the process) to 19 (least favorable to the process).
  277 # nice_priority =
  278 #
  279 # You can set I/O scheduling class and priority of processes. I/O niceness
  280 # class values are IOPRIO_CLASS_RT (realtime), IOPRIO_CLASS_BE (best-effort) and
  281 # IOPRIO_CLASS_IDLE (idle). I/O niceness priority is a number which goes from
  282 # 0 to 7. The higher the value, the lower the I/O priority of the process.
  283 # Work only with ionice_class.
  284 # ionice_class =
  285 # ionice_priority =
  286 
  287 # Some proxy-server configuration options may be overridden on a per-policy
  288 # basis by including per-policy config section(s). The value of any option
  289 # specified a per-policy section will override any value given in the
  290 # proxy-server section for that policy only. Otherwise the value of these
  291 # options will be that specified in the proxy-server section.
  292 # The section name should refer to the policy index, not the policy name.
  293 # [proxy-server:policy:<policy index>]
  294 # sorting_method =
  295 # read_affinity =
  296 # write_affinity =
  297 # write_affinity_node_count =
  298 # write_affinity_handoff_delete_count =
  299 
  300 [filter:tempauth]
  301 use = egg:swift#tempauth
  302 # You can override the default log routing for this filter here:
  303 # set log_name = tempauth
  304 # set log_facility = LOG_LOCAL0
  305 # set log_level = INFO
  306 # set log_headers = false
  307 # set log_address = /dev/log
  308 #
  309 # The reseller prefix will verify a token begins with this prefix before even
  310 # attempting to validate it. Also, with authorization, only Swift storage
  311 # accounts with this prefix will be authorized by this middleware. Useful if
  312 # multiple auth systems are in use for one Swift cluster.
  313 # The reseller_prefix may contain a comma separated list of items. The first
  314 # item is used for the token as mentioned above. If second and subsequent
  315 # items exist, the middleware will handle authorization for an account with
  316 # that prefix. For example, for prefixes "AUTH, SERVICE", a path of
  317 # /v1/SERVICE_account is handled the same as /v1/AUTH_account. If an empty
  318 # (blank) reseller prefix is required, it must be first in the list. Two
  319 # single quote characters indicates an empty (blank) reseller prefix.
  320 # reseller_prefix = AUTH
  321 
  322 #
  323 # The require_group parameter names a group that must be presented by
  324 # either X-Auth-Token or X-Service-Token. Usually this parameter is
  325 # used only with multiple reseller prefixes (e.g., SERVICE_require_group=blah).
  326 # By default, no group is needed. Do not use .admin.
  327 # require_group =
  328 
  329 # The auth prefix will cause requests beginning with this prefix to be routed
  330 # to the auth subsystem, for granting tokens, etc.
  331 # auth_prefix = /auth/
  332 # token_life = 86400
  333 #
  334 # This allows middleware higher in the WSGI pipeline to override auth
  335 # processing, useful for middleware such as tempurl and formpost. If you know
  336 # you're not going to use such middleware and you want a bit of extra security,
  337 # you can set this to false.
  338 # allow_overrides = true
  339 #
  340 # This specifies what scheme to return with storage URLs:
  341 # http, https, or default (chooses based on what the server is running as)
  342 # This can be useful with an SSL load balancer in front of a non-SSL server.
  343 # storage_url_scheme = default
  344 #
  345 # Lastly, you need to list all the accounts/users you want here. The format is:
  346 #   user_<account>_<user> = <key> [group] [group] [...] [storage_url]
  347 # or if you want underscores in <account> or <user>, you can base64 encode them
  348 # (with no equal signs) and use this format:
  349 #   user64_<account_b64>_<user_b64> = <key> [group] [group] [...] [storage_url]
  350 # There are special groups of:
  351 #   .reseller_admin = can do anything to any account for this auth
  352 #   .admin = can do anything within the account
  353 # If neither of these groups are specified, the user can only access containers
  354 # that have been explicitly allowed for them by a .admin or .reseller_admin.
  355 # The trailing optional storage_url allows you to specify an alternate url to
  356 # hand back to the user upon authentication. If not specified, this defaults to
  357 # $HOST/v1/<reseller_prefix>_<account> where $HOST will do its best to resolve
  358 # to what the requester would need to use to reach this host.
  359 # Here are example entries, required for running the tests:
  360 user_admin_admin = admin .admin .reseller_admin
  361 user_test_tester = testing .admin
  362 user_test_tester2 = testing2 .admin
  363 user_test_tester3 = testing3
  364 user_test2_tester2 = testing2 .admin
  365 user_test5_tester5 = testing5 service
  366 
  367 # To enable Keystone authentication you need to have the auth token
  368 # middleware first to be configured. Here is an example below, please
  369 # refer to the keystone's documentation for details about the
  370 # different settings.
  371 #
  372 # You'll also need to have the keystoneauth middleware enabled and have it in
  373 # your main pipeline, as show in the sample pipeline at the top of this file.
  374 #
  375 # Following parameters are known to work with keystonemiddleware v2.3.0
  376 # (above v2.0.0), but checking the latest information in the wiki page[1]
  377 # is recommended.
  378 # 1. https://docs.openstack.org/keystonemiddleware/latest/middlewarearchitecture.html#configuration
  379 #
  380 # [filter:authtoken]
  381 # paste.filter_factory = keystonemiddleware.auth_token:filter_factory
  382 # www_authenticate_uri = http://keystonehost:5000
  383 # auth_url = http://keystonehost:35357
  384 # auth_plugin = password
  385 # The following credentials must match the Keystone credentials for the Swift
  386 # service and may need to be changed to match your Keystone configuration. The
  387 # example values shown here assume a user named 'swift' with admin role on a
  388 # project named 'service', both being in the Keystone domain with id 'default'.
  389 # Refer to the keystonemiddleware documentation link above [1] for other
  390 # examples.
  391 # project_domain_id = default
  392 # user_domain_id = default
  393 # project_name = service
  394 # username = swift
  395 # password = password
  396 #
  397 # delay_auth_decision defaults to False, but leaving it as false will
  398 # prevent other auth systems, staticweb, tempurl, formpost, and ACLs from
  399 # working. This value must be explicitly set to True.
  400 # delay_auth_decision = False
  401 #
  402 # cache = swift.cache
  403 # include_service_catalog = False
  404 #
  405 # [filter:keystoneauth]
  406 # use = egg:swift#keystoneauth
  407 # The reseller_prefix option lists account namespaces that this middleware is
  408 # responsible for. The prefix is placed before the Keystone project id.
  409 # For example, for project 12345678, and prefix AUTH, the account is
  410 # named AUTH_12345678 (i.e., path is /v1/AUTH_12345678/...).
  411 # Several prefixes are allowed by specifying a comma-separated list
  412 # as in: "reseller_prefix = AUTH, SERVICE". The empty string indicates a
  413 # single blank/empty prefix. If an empty prefix is required in a list of
  414 # prefixes, a value of '' (two single quote characters) indicates a
  415 # blank/empty prefix. Except for the blank/empty prefix, an underscore ('_')
  416 # character is appended to the value unless already present.
  417 # reseller_prefix = AUTH
  418 #
  419 # The user must have at least one role named by operator_roles on a
  420 # project in order to create, delete and modify containers and objects
  421 # and to set and read privileged headers such as ACLs.
  422 # If there are several reseller prefix items, you can prefix the
  423 # parameter so it applies only to those accounts (for example
  424 # the parameter SERVICE_operator_roles applies to the /v1/SERVICE_<project>
  425 # path). If you omit the prefix, the option applies to all reseller
  426 # prefix items. For the blank/empty prefix, prefix with '' (do not put
  427 # underscore after the two single quote characters).
  428 # operator_roles = admin, swiftoperator
  429 #
  430 # The reseller admin role has the ability to create and delete accounts
  431 # reseller_admin_role = ResellerAdmin
  432 #
  433 # This allows middleware higher in the WSGI pipeline to override auth
  434 # processing, useful for middleware such as tempurl and formpost. If you know
  435 # you're not going to use such middleware and you want a bit of extra security,
  436 # you can set this to false.
  437 # allow_overrides = true
  438 #
  439 # If the service_roles parameter is present, an X-Service-Token must be
  440 # present in the request that when validated, grants at least one role listed
  441 # in the parameter. The X-Service-Token may be scoped to any project.
  442 # If there are several reseller prefix items, you can prefix the
  443 # parameter so it applies only to those accounts (for example
  444 # the parameter SERVICE_service_roles applies to the /v1/SERVICE_<project>
  445 # path). If you omit the prefix, the option applies to all reseller
  446 # prefix items. For the blank/empty prefix, prefix with '' (do not put
  447 # underscore after the two single quote characters).
  448 # By default, no service_roles are required.
  449 # service_roles =
  450 #
  451 # For backwards compatibility, keystoneauth will match names in cross-tenant
  452 # access control lists (ACLs) when both the requesting user and the tenant
  453 # are in the default domain i.e the domain to which existing tenants are
  454 # migrated. The default_domain_id value configured here should be the same as
  455 # the value used during migration of tenants to keystone domains.
  456 # default_domain_id = default
  457 #
  458 # For a new installation, or an installation in which keystone projects may
  459 # move between domains, you should disable backwards compatible name matching
  460 # in ACLs by setting allow_names_in_acls to false:
  461 # allow_names_in_acls = true
  462 
  463 [filter:s3api]
  464 use = egg:swift#s3api
  465 
  466 # s3api setup:
  467 #
  468 # With either tempauth or your custom auth:
  469 # - Put s3api just before your auth filter(s) in the pipeline
  470 # With keystone:
  471 # - Put s3api and s3token before keystoneauth in the pipeline
  472 #
  473 # Swift has no concept of the S3's resource owner; the resources
  474 # (i.e. containers and objects) created via the Swift API have no owner
  475 # information. This option specifies how the s3api middleware handles them
  476 # with the S3 API.  If this option is 'false', such kinds of resources will be
  477 # invisible and no users can access them with the S3 API.  If set to 'true',
  478 # a resource without an owner belongs to everyone and everyone can access it
  479 # with the S3 API.  If you care about S3 compatibility, set 'false' here.  This
  480 # option makes sense only when the s3_acl option is set to 'true' and your
  481 # Swift cluster has the resources created via the Swift API.
  482 # allow_no_owner = false
  483 #
  484 # Set a region name of your Swift cluster.  Note that the s3api doesn't choose
  485 # a region of the newly created bucket.  This value is used for the
  486 # GET Bucket location API and v4 signatures calculation.
  487 # location = us-east-1
  488 #
  489 # Set whether to enforce DNS-compliant bucket names. Note that S3 enforces
  490 # these conventions in all regions except the US Standard region.
  491 # dns_compliant_bucket_names = True
  492 #
  493 # Set the default maximum number of objects returned in the GET Bucket
  494 # response.
  495 # max_bucket_listing = 1000
  496 #
  497 # Set the maximum number of parts returned in the List Parts operation.
  498 # (default: 1000 as well as S3 specification)
  499 # If setting it larger than 10000 (swift container_listing_limit default)
  500 # make sure you also increase the container_listing_limit in swift.conf.
  501 # max_parts_listing = 1000
  502 #
  503 # Set the maximum number of objects we can delete with the Multi-Object Delete
  504 # operation.
  505 # max_multi_delete_objects = 1000
  506 #
  507 # Set the number of objects to delete at a time with the Multi-Object Delete
  508 # operation.
  509 # multi_delete_concurrency = 2
  510 #
  511 # If set to 'true', s3api uses its own metadata for ACLs
  512 # (e.g. X-Container-Sysmeta-S3Api-Acl) to achieve the best S3 compatibility.
  513 # If set to 'false', s3api tries to use Swift ACLs (e.g. X-Container-Read)
  514 # instead of S3 ACLs as far as possible.
  515 # There are some caveats that one should know about this setting. Firstly,
  516 # if set to 'false' after being previously set to 'true' any new objects or
  517 # containers stored while 'true' setting will be accessible to all users
  518 # because the s3 ACLs will be ignored under s3_acl=False setting. Secondly,
  519 # s3_acl True mode don't keep ACL consistency between both the S3 and Swift
  520 # API. Meaning with s3_acl enabled S3 ACLs only effect objects and buckets
  521 # via the S3 API. As this ACL information wont be available via the Swift API
  522 # and so the ACL wont be applied.
  523 # Note that s3_acl currently supports only keystone and tempauth.
  524 # DON'T USE THIS for production before enough testing for your use cases.
  525 # This stuff is still under development and it might cause something
  526 # you don't expect.
  527 # s3_acl = false
  528 #
  529 # Specify a host name of your Swift cluster.  This enables virtual-hosted style
  530 # requests.
  531 # storage_domain =
  532 #
  533 # Enable pipeline order check for SLO, s3token, authtoken, keystoneauth
  534 # according to standard s3api/Swift construction using either tempauth or
  535 # keystoneauth. If the order is incorrect, it raises an exception to stop
  536 # proxy. Turn auth_pipeline_check off only when you want to bypass these
  537 # authenticate middlewares in order to use other 3rd party (or your
  538 # proprietary) authenticate middleware.
  539 # auth_pipeline_check = True
  540 #
  541 # Enable multi-part uploads. (default: true)
  542 # This is required to store files larger than Swift's max_file_size (by
  543 # default, 5GiB). Note that has performance implications when deleting objects,
  544 # as we now have to check for whether there are also segments to delete.
  545 # allow_multipart_uploads = True
  546 #
  547 # Set the maximum number of parts for Upload Part operation.(default: 1000)
  548 # When setting it to be larger than the default value in order to match the
  549 # specification of S3, set to be larger max_manifest_segments for slo
  550 # middleware.(specification of S3: 10000)
  551 # max_upload_part_num = 1000
  552 #
  553 # Enable returning only buckets which owner are the user who requested
  554 # GET Service operation. (default: false)
  555 # If you want to enable the above feature, set this and s3_acl to true.
  556 # That might cause significant performance degradation. So, only if your
  557 # service absolutely need this feature, set this setting to true.
  558 # If you set this to false, s3api returns all buckets.
  559 # check_bucket_owner = false
  560 #
  561 # By default, Swift reports only S3 style access log.
  562 # (e.g. PUT /bucket/object) If set force_swift_request_proxy_log
  563 # to be 'true', Swift will become to output Swift style log
  564 # (e.g. PUT /v1/account/container/object) in addition to S3 style log.
  565 # Note that they will be reported twice (i.e. s3api doesn't care about
  566 # the duplication) and Swift style log will includes also various subrequests
  567 # to achieve S3 compatibilities when force_swift_request_proxy_log is set to
  568 # 'true'
  569 # force_swift_request_proxy_log = false
  570 #
  571 # AWS S3 document says that each part must be at least 5 MB in a multipart
  572 # upload, except the last part.
  573 # min_segment_size = 5242880
  574 
  575 # You can override the default log routing for this filter here:
  576 # log_name = s3api
  577 
  578 [filter:s3token]
  579 # s3token middleware authenticates with keystone using the s3 credentials
  580 # provided in the request header. Please put s3token between s3api
  581 # and keystoneauth if you're using keystoneauth.
  582 use = egg:swift#s3token
  583 
  584 # Prefix that will be prepended to the tenant to form the account
  585 reseller_prefix = AUTH_
  586 
  587 # By default, s3token will reject all invalid S3-style requests. Set this to
  588 # True to delegate that decision to downstream WSGI components. This may be
  589 # useful if there are multiple auth systems in the proxy pipeline.
  590 delay_auth_decision = False
  591 
  592 # Keystone server details. Note that this differs from how swift3 was
  593 # configured: in particular, the Keystone API version must be included.
  594 auth_uri = http://keystonehost:35357/v3
  595 
  596 # Connect/read timeout to use when communicating with Keystone
  597 http_timeout = 10.0
  598 
  599 # SSL-related options
  600 # insecure = False
  601 # certfile =
  602 # keyfile =
  603 
  604 # You can override the default log routing for this filter here:
  605 # log_name = s3token
  606 
  607 [filter:healthcheck]
  608 use = egg:swift#healthcheck
  609 # An optional filesystem path, which if present, will cause the healthcheck
  610 # URL to return "503 Service Unavailable" with a body of "DISABLED BY FILE".
  611 # This facility may be used to temporarily remove a Swift node from a load
  612 # balancer pool during maintenance or upgrade (remove the file to allow the
  613 # node back into the load balancer pool).
  614 # disable_path =
  615 
  616 [filter:cache]
  617 use = egg:swift#memcache
  618 # You can override the default log routing for this filter here:
  619 # set log_name = cache
  620 # set log_facility = LOG_LOCAL0
  621 # set log_level = INFO
  622 # set log_headers = false
  623 # set log_address = /dev/log
  624 #
  625 # If not set here, the value for memcache_servers will be read from
  626 # memcache.conf (see memcache.conf-sample) or lacking that file, it will
  627 # default to the value below. You can specify multiple servers separated with
  628 # commas, as in: 10.1.2.3:11211,10.1.2.4:11211 (IPv6 addresses must
  629 # follow rfc3986 section-3.2.2, i.e. [::1]:11211)
  630 # memcache_servers = 127.0.0.1:11211
  631 #
  632 # Sets how memcache values are serialized and deserialized:
  633 # 0 = older, insecure pickle serialization
  634 # 1 = json serialization but pickles can still be read (still insecure)
  635 # 2 = json serialization only (secure and the default)
  636 # If not set here, the value for memcache_serialization_support will be read
  637 # from /etc/swift/memcache.conf (see memcache.conf-sample).
  638 # To avoid an instant full cache flush, existing installations should
  639 # upgrade with 0, then set to 1 and reload, then after some time (24 hours)
  640 # set to 2 and reload.
  641 # In the future, the ability to use pickle serialization will be removed.
  642 # memcache_serialization_support = 2
  643 #
  644 # Sets the maximum number of connections to each memcached server per worker
  645 # memcache_max_connections = 2
  646 #
  647 # More options documented in memcache.conf-sample
  648 
  649 [filter:ratelimit]
  650 use = egg:swift#ratelimit
  651 # You can override the default log routing for this filter here:
  652 # set log_name = ratelimit
  653 # set log_facility = LOG_LOCAL0
  654 # set log_level = INFO
  655 # set log_headers = false
  656 # set log_address = /dev/log
  657 #
  658 # clock_accuracy should represent how accurate the proxy servers' system clocks
  659 # are with each other. 1000 means that all the proxies' clock are accurate to
  660 # each other within 1 millisecond.  No ratelimit should be higher than the
  661 # clock accuracy.
  662 # clock_accuracy = 1000
  663 #
  664 # max_sleep_time_seconds = 60
  665 #
  666 # log_sleep_time_seconds of 0 means disabled
  667 # log_sleep_time_seconds = 0
  668 #
  669 # allows for slow rates (e.g. running up to 5 sec's behind) to catch up.
  670 # rate_buffer_seconds = 5
  671 #
  672 # account_ratelimit of 0 means disabled
  673 # account_ratelimit = 0
  674 
  675 # DEPRECATED- these will continue to work but will be replaced
  676 # by the X-Account-Sysmeta-Global-Write-Ratelimit flag.
  677 # Please see ratelimiting docs for details.
  678 # these are comma separated lists of account names
  679 # account_whitelist = a,b
  680 # account_blacklist = c,d
  681 
  682 # with container_limit_x = r
  683 # for containers of size x limit write requests per second to r.  The container
  684 # rate will be linearly interpolated from the values given. With the values
  685 # below, a container of size 5 will get a rate of 75.
  686 # container_ratelimit_0 = 100
  687 # container_ratelimit_10 = 50
  688 # container_ratelimit_50 = 20
  689 
  690 # Similarly to the above container-level write limits, the following will limit
  691 # container GET (listing) requests.
  692 # container_listing_ratelimit_0 = 100
  693 # container_listing_ratelimit_10 = 50
  694 # container_listing_ratelimit_50 = 20
  695 
  696 [filter:read_only]
  697 use = egg:swift#read_only
  698 # read_only set to true means turn global read only on
  699 # read_only = false
  700 # allow_deletes set to true means to allow deletes
  701 # allow_deletes = false
  702 # Note: Put after ratelimit in the pipeline.
  703 
  704 [filter:domain_remap]
  705 use = egg:swift#domain_remap
  706 # You can override the default log routing for this filter here:
  707 # set log_name = domain_remap
  708 # set log_facility = LOG_LOCAL0
  709 # set log_level = INFO
  710 # set log_headers = false
  711 # set log_address = /dev/log
  712 #
  713 # Specify the storage_domain that match your cloud, multiple domains
  714 # can be specified separated by a comma
  715 # storage_domain = example.com
  716 
  717 # Specify a root path part that will be added to the start of paths if not
  718 # already present.
  719 # path_root = v1
  720 
  721 # Browsers can convert a host header to lowercase, so check that reseller
  722 # prefix on the account is the correct case. This is done by comparing the
  723 # items in the reseller_prefixes config option to the found prefix. If they
  724 # match except for case, the item from reseller_prefixes will be used
  725 # instead of the found reseller prefix. When none match, the default reseller
  726 # prefix is used. When no default reseller prefix is configured, any request
  727 # with an account prefix not in that list will be ignored by this middleware.
  728 # reseller_prefixes = AUTH
  729 # default_reseller_prefix =
  730 
  731 # Enable legacy remapping behavior for versioned path requests:
  732 #   c.a.example.com/v1/o -> /v1/AUTH_a/c/o
  733 # instead of
  734 #   c.a.example.com/v1/o -> /v1/AUTH_a/c/v1/o
  735 # ... by default all path parts after a remapped domain are considered part of
  736 # the object name with no special case for the path "v1"
  737 # mangle_client_paths = False
  738 
  739 [filter:catch_errors]
  740 use = egg:swift#catch_errors
  741 # You can override the default log routing for this filter here:
  742 # set log_name = catch_errors
  743 # set log_facility = LOG_LOCAL0
  744 # set log_level = INFO
  745 # set log_headers = false
  746 # set log_address = /dev/log
  747 
  748 [filter:cname_lookup]
  749 # Note: this middleware requires python-dnspython
  750 use = egg:swift#cname_lookup
  751 # You can override the default log routing for this filter here:
  752 # set log_name = cname_lookup
  753 # set log_facility = LOG_LOCAL0
  754 # set log_level = INFO
  755 # set log_headers = false
  756 # set log_address = /dev/log
  757 #
  758 # Specify the storage_domain that match your cloud, multiple domains
  759 # can be specified separated by a comma
  760 # storage_domain = example.com
  761 #
  762 # lookup_depth = 1
  763 #
  764 # Specify the nameservers to use to do the CNAME resolution. If unset, the
  765 # system configuration is used. Multiple nameservers can be specified
  766 # separated by a comma. Default port 53 can be overridden. IPv6 is accepted.
  767 # Example: 127.0.0.1, 127.0.0.2, 127.0.0.3:5353, [::1], [::1]:5353
  768 # nameservers =
  769 
  770 # Note: Put staticweb just after your auth filter(s) in the pipeline
  771 [filter:staticweb]
  772 use = egg:swift#staticweb
  773 # You can override the default log routing for this filter here:
  774 # set log_name = staticweb
  775 # set log_facility = LOG_LOCAL0
  776 # set log_level = INFO
  777 # set log_headers = false
  778 # set log_address = /dev/log
  779 #
  780 # At times when it's impossible for staticweb to guess the outside
  781 # endpoint correctly, the url_base may be used to supply the URL
  782 # scheme and/or the host name (and port number) in order to generate
  783 # redirects.
  784 # Example values:
  785 #    http://www.example.com    - redirect to www.example.com
  786 #    https:                    - changes the schema only
  787 #    https://                  - same, changes the schema only
  788 #    //www.example.com:8080    - redirect www.example.com on port 8080
  789 #                                (schema unchanged)
  790 # url_base =
  791 
  792 # Note: Put tempurl before dlo, slo and your auth filter(s) in the pipeline
  793 [filter:tempurl]
  794 use = egg:swift#tempurl
  795 # The methods allowed with Temp URLs.
  796 # methods = GET HEAD PUT POST DELETE
  797 #
  798 # The headers to remove from incoming requests. Simply a whitespace delimited
  799 # list of header names and names can optionally end with '*' to indicate a
  800 # prefix match. incoming_allow_headers is a list of exceptions to these
  801 # removals.
  802 # incoming_remove_headers = x-timestamp
  803 #
  804 # The headers allowed as exceptions to incoming_remove_headers. Simply a
  805 # whitespace delimited list of header names and names can optionally end with
  806 # '*' to indicate a prefix match.
  807 # incoming_allow_headers =
  808 #
  809 # The headers to remove from outgoing responses. Simply a whitespace delimited
  810 # list of header names and names can optionally end with '*' to indicate a
  811 # prefix match. outgoing_allow_headers is a list of exceptions to these
  812 # removals.
  813 # outgoing_remove_headers = x-object-meta-*
  814 #
  815 # The headers allowed as exceptions to outgoing_remove_headers. Simply a
  816 # whitespace delimited list of header names and names can optionally end with
  817 # '*' to indicate a prefix match.
  818 # outgoing_allow_headers = x-object-meta-public-*
  819 #
  820 # The digest algorithm(s) supported for generating signatures;
  821 # whitespace-delimited.
  822 # allowed_digests = sha1 sha256 sha512
  823 
  824 # Note: Put formpost just before your auth filter(s) in the pipeline
  825 [filter:formpost]
  826 use = egg:swift#formpost
  827 
  828 # Note: Just needs to be placed before the proxy-server in the pipeline.
  829 [filter:name_check]
  830 use = egg:swift#name_check
  831 # forbidden_chars = '"`<>
  832 # maximum_length = 255
  833 # forbidden_regexp = /\./|/\.\./|/\.$|/\.\.$
  834 
  835 [filter:list-endpoints]
  836 use = egg:swift#list_endpoints
  837 # list_endpoints_path = /endpoints/
  838 
  839 [filter:proxy-logging]
  840 use = egg:swift#proxy_logging
  841 # If not set, logging directives from [DEFAULT] without "access_" will be used
  842 # access_log_name = swift
  843 # access_log_facility = LOG_LOCAL0
  844 # access_log_level = INFO
  845 # access_log_address = /dev/log
  846 #
  847 # If set, access_log_udp_host will override access_log_address
  848 # access_log_udp_host =
  849 # access_log_udp_port = 514
  850 #
  851 # You can use log_statsd_* from [DEFAULT] or override them here:
  852 # access_log_statsd_host =
  853 # access_log_statsd_port = 8125
  854 # access_log_statsd_default_sample_rate = 1.0
  855 # access_log_statsd_sample_rate_factor = 1.0
  856 # access_log_statsd_metric_prefix =
  857 # access_log_headers = false
  858 #
  859 # If access_log_headers is True and access_log_headers_only is set only
  860 # these headers are logged. Multiple headers can be defined as comma separated
  861 # list like this: access_log_headers_only = Host, X-Object-Meta-Mtime
  862 # access_log_headers_only =
  863 #
  864 # By default, the X-Auth-Token is logged. To obscure the value,
  865 # set reveal_sensitive_prefix to the number of characters to log.
  866 # For example, if set to 12, only the first 12 characters of the
  867 # token appear in the log. An unauthorized access of the log file
  868 # won't allow unauthorized usage of the token. However, the first
  869 # 12 or so characters is unique enough that you can trace/debug
  870 # token usage. Set to 0 to suppress the token completely (replaced
  871 # by '...' in the log).
  872 # Note: reveal_sensitive_prefix will not affect the value
  873 # logged with access_log_headers=True.
  874 # reveal_sensitive_prefix = 16
  875 #
  876 # What HTTP methods are allowed for StatsD logging (comma-sep); request methods
  877 # not in this list will have "BAD_METHOD" for the <verb> portion of the metric.
  878 # log_statsd_valid_http_methods = GET,HEAD,POST,PUT,DELETE,COPY,OPTIONS
  879 #
  880 # Note: The double proxy-logging in the pipeline is not a mistake. The
  881 # left-most proxy-logging is there to log requests that were handled in
  882 # middleware and never made it through to the right-most middleware (and
  883 # proxy server). Double logging is prevented for normal requests. See
  884 # proxy-logging docs.
  885 
  886 # Note: Put before both ratelimit and auth in the pipeline.
  887 [filter:bulk]
  888 use = egg:swift#bulk
  889 # max_containers_per_extraction = 10000
  890 # max_failed_extractions = 1000
  891 # max_deletes_per_request = 10000
  892 # max_failed_deletes = 1000
  893 #
  894 # In order to keep a connection active during a potentially long bulk request,
  895 # Swift may return whitespace prepended to the actual response body. This
  896 # whitespace will be yielded no more than every yield_frequency seconds.
  897 # yield_frequency = 10
  898 #
  899 # Note: The following parameter is used during a bulk delete of objects and
  900 # their container. This would frequently fail because it is very likely
  901 # that all replicated objects have not been deleted by the time the middleware got a
  902 # successful response. It can be configured the number of retries. And the
  903 # number of seconds to wait between each retry will be 1.5**retry
  904 # delete_container_retry_count = 0
  905 #
  906 # To speed up the bulk delete process, multiple deletes may be executed in
  907 # parallel. Avoid setting this too high, as it gives clients a force multiplier
  908 # which may be used in DoS attacks. The suggested range is between 2 and 10.
  909 # delete_concurrency = 2
  910 
  911 # Note: Put after auth and staticweb in the pipeline.
  912 [filter:slo]
  913 use = egg:swift#slo
  914 # max_manifest_segments = 1000
  915 # max_manifest_size = 8388608
  916 #
  917 # Rate limiting applies only to segments smaller than this size (bytes).
  918 # rate_limit_under_size = 1048576
  919 #
  920 # Start rate-limiting SLO segment serving after the Nth small segment of a
  921 # segmented object.
  922 # rate_limit_after_segment = 10
  923 #
  924 # Once segment rate-limiting kicks in for an object, limit segments served
  925 # to N per second. 0 means no rate-limiting.
  926 # rate_limit_segments_per_sec = 1
  927 #
  928 # Time limit on GET requests (seconds)
  929 # max_get_time = 86400
  930 #
  931 # When creating an SLO, multiple segment validations may be executed in
  932 # parallel. Further, multiple deletes may be executed in parallel when deleting
  933 # with ?multipart-manifest=delete. Use this setting to limit how many
  934 # subrequests may be executed concurrently. Avoid setting it too high, as it
  935 # gives clients a force multiplier which may be used in DoS attacks. The
  936 # suggested range is between 2 and 10.
  937 # concurrency = 2
  938 #
  939 # This may be used to separately tune validation and delete concurrency values.
  940 # Default is to use the concurrency value from above; all of the same caveats
  941 # apply regarding recommended ranges.
  942 # delete_concurrency = 2
  943 #
  944 # In order to keep a connection active during a potentially long PUT request,
  945 # clients may request that Swift send whitespace ahead of the final response
  946 # body. This whitespace will be yielded at most every yield_frequency seconds.
  947 # yield_frequency = 10
  948 
  949 # Note: Put after auth and staticweb in the pipeline.
  950 # If you don't put it in the pipeline, it will be inserted for you.
  951 [filter:dlo]
  952 use = egg:swift#dlo
  953 # Start rate-limiting DLO segment serving after the Nth segment of a
  954 # segmented object.
  955 # rate_limit_after_segment = 10
  956 #
  957 # Once segment rate-limiting kicks in for an object, limit segments served
  958 # to N per second. 0 means no rate-limiting.
  959 # rate_limit_segments_per_sec = 1
  960 #
  961 # Time limit on GET requests (seconds)
  962 # max_get_time = 86400
  963 
  964 # Note: Put after auth in the pipeline.
  965 [filter:container-quotas]
  966 use = egg:swift#container_quotas
  967 
  968 # Note: Put after auth in the pipeline.
  969 [filter:account-quotas]
  970 use = egg:swift#account_quotas
  971 
  972 [filter:gatekeeper]
  973 use = egg:swift#gatekeeper
  974 # Set this to false if you want to allow clients to set arbitrary X-Timestamps
  975 # on uploaded objects. This may be used to preserve timestamps when migrating
  976 # from a previous storage system, but risks allowing users to upload
  977 # difficult-to-delete data.
  978 # shunt_inbound_x_timestamp = true
  979 #
  980 # You can override the default log routing for this filter here:
  981 # set log_name = gatekeeper
  982 # set log_facility = LOG_LOCAL0
  983 # set log_level = INFO
  984 # set log_headers = false
  985 # set log_address = /dev/log
  986 
  987 [filter:container_sync]
  988 use = egg:swift#container_sync
  989 # Set this to false if you want to disallow any full URL values to be set for
  990 # any new X-Container-Sync-To headers. This will keep any new full URLs from
  991 # coming in, but won't change any existing values already in the cluster.
  992 # Updating those will have to be done manually, as knowing what the true realm
  993 # endpoint should be cannot always be guessed.
  994 # allow_full_urls = true
  995 # Set this to specify this clusters //realm/cluster as "current" in /info
  996 # current = //REALM/CLUSTER
  997 
  998 # Note: Put it at the beginning of the pipeline to profile all middleware. But
  999 # it is safer to put this after catch_errors, gatekeeper and healthcheck.
 1000 [filter:xprofile]
 1001 use = egg:swift#xprofile
 1002 # This option enable you to switch profilers which should inherit from python
 1003 # standard profiler. Currently the supported value can be 'cProfile',
 1004 # 'eventlet.green.profile' etc.
 1005 # profile_module = eventlet.green.profile
 1006 #
 1007 # This prefix will be used to combine process ID and timestamp to name the
 1008 # profile data file.  Make sure the executing user has permission to write
 1009 # into this path (missing path segments will be created, if necessary).
 1010 # If you enable profiling in more than one type of daemon, you must override
 1011 # it with an unique value like: /var/log/swift/profile/proxy.profile
 1012 # log_filename_prefix = /tmp/log/swift/profile/default.profile
 1013 #
 1014 # the profile data will be dumped to local disk based on above naming rule
 1015 # in this interval.
 1016 # dump_interval = 5.0
 1017 #
 1018 # Be careful, this option will enable profiler to dump data into the file with
 1019 # time stamp which means there will be lots of files piled up in the directory.
 1020 # dump_timestamp = false
 1021 #
 1022 # This is the path of the URL to access the mini web UI.
 1023 # path = /__profile__
 1024 #
 1025 # Clear the data when the wsgi server shutdown.
 1026 # flush_at_shutdown = false
 1027 #
 1028 # unwind the iterator of applications
 1029 # unwind = false
 1030 
 1031 # Note: Put after slo, dlo in the pipeline.
 1032 # If you don't put it in the pipeline, it will be inserted automatically.
 1033 [filter:versioned_writes]
 1034 use = egg:swift#versioned_writes
 1035 # Enables using versioned writes middleware and exposing configuration
 1036 # settings via HTTP GET /info.
 1037 # WARNING: Setting this option bypasses the "allow_versions" option
 1038 # in the container configuration file, which will be eventually
 1039 # deprecated. See documentation for more details.
 1040 # allow_versioned_writes = false
 1041 
 1042 # Note: Put after auth and before dlo and slo middlewares.
 1043 # If you don't put it in the pipeline, it will be inserted for you.
 1044 [filter:copy]
 1045 use = egg:swift#copy
 1046 
 1047 # Note: To enable encryption, add the following 2 dependent pieces of crypto
 1048 # middleware to the proxy-server pipeline. They should be to the right of all
 1049 # other middleware apart from the final proxy-logging middleware, and in the
 1050 # order shown in this example:
 1051 # <other middleware> keymaster encryption proxy-logging proxy-server
 1052 [filter:keymaster]
 1053 use = egg:swift#keymaster
 1054 
 1055 # Sets the root secret from which encryption keys are derived. This must be set
 1056 # before first use to a value that is a base64 encoding of at least 32 bytes.
 1057 # The security of all encrypted data critically depends on this key, therefore
 1058 # it should be set to a high-entropy value. For example, a suitable value may
 1059 # be obtained by base-64 encoding a 32 byte (or longer) value generated by a
 1060 # cryptographically secure random number generator. Changing the root secret is
 1061 # likely to result in data loss.
 1062 encryption_root_secret = changeme
 1063 
 1064 # Multiple root secrets may be configured using options named
 1065 # 'encryption_root_secret_<secret_id>' where 'secret_id' is a unique
 1066 # identifier. This enables the root secret to be changed from time to time.
 1067 # Only one root secret is used for object PUTs or POSTs at any moment in time.
 1068 # This is specified by the 'active_root_secret_id' option. If
 1069 # 'active_root_secret_id' is not specified then the root secret specified by
 1070 # 'encryption_root_secret' is considered to be the default. Once a root secret
 1071 # has been used as the default root secret it must remain in the config file in
 1072 # order that any objects that were encrypted with it may be subsequently
 1073 # decrypted. The secret_id used to identify the key cannot change.
 1074 # encryption_root_secret_myid = changeme
 1075 # active_root_secret_id = myid
 1076 
 1077 # Sets the path from which the keymaster config options should be read. This
 1078 # allows multiple processes which need to be encryption-aware (for example,
 1079 # proxy-server and container-sync) to share the same config file, ensuring
 1080 # that the encryption keys used are the same. The format expected is similar
 1081 # to other config files, with a single [keymaster] section and a single
 1082 # encryption_root_secret option. If this option is set, the root secret
 1083 # MUST NOT be set in proxy-server.conf.
 1084 # keymaster_config_path =
 1085 
 1086 # To store the encryption root secret in a remote key management system (KMS)
 1087 # such as Barbican, replace the keymaster middleware with the kms_keymaster
 1088 # middleware in the proxy-server pipeline. They should be to the right of all
 1089 # other middleware apart from the final proxy-logging middleware, and in the
 1090 # order shown in this example:
 1091 # <other middleware> kms_keymaster encryption proxy-logging proxy-server
 1092 [filter:kms_keymaster]
 1093 use = egg:swift#kms_keymaster
 1094 
 1095 # Sets the path from which the keymaster config options should be read. This
 1096 # allows multiple processes which need to be encryption-aware (for example,
 1097 # proxy-server and container-sync) to share the same config file, ensuring
 1098 # that the encryption keys used are the same. The format expected is similar
 1099 # to other config files, with a single [kms_keymaster] section. See the
 1100 # keymaster.conf-sample file for details on the kms_keymaster configuration
 1101 # options.
 1102 # keymaster_config_path =
 1103 
 1104 # kmip_keymaster middleware may be used to fetch an encryption root secret from
 1105 # a KMIP service. It should replace, in the same position, any other keymaster
 1106 # middleware in the proxy-server pipeline, so that the middleware order is as
 1107 # shown in this example:
 1108 # <other middleware> kmip_keymaster encryption proxy-logging proxy-server
 1109 [filter:kmip_keymaster]
 1110 use = egg:swift#kmip_keymaster
 1111 
 1112 # Sets the path from which the keymaster config options should be read. This
 1113 # allows multiple processes which need to be encryption-aware (for example,
 1114 # proxy-server and container-sync) to share the same config file, ensuring
 1115 # that the encryption keys used are the same. As an added benefit the
 1116 # keymaster configuration file can have different permissions than the
 1117 # `proxy-server.conf` file. The format expected is similar
 1118 # to other config files, with a single [kmip_keymaster] section. See the
 1119 # keymaster.conf-sample file for details on the kmip_keymaster configuration
 1120 # options.
 1121 # keymaster_config_path =
 1122 
 1123 [filter:encryption]
 1124 use = egg:swift#encryption
 1125 
 1126 # By default all PUT or POST'ed object data and/or metadata will be encrypted.
 1127 # Encryption of new data and/or metadata may be disabled by setting
 1128 # disable_encryption to True. However, all encryption middleware should remain
 1129 # in the pipeline in order for existing encrypted data to be read.
 1130 # disable_encryption = False
 1131 
 1132 # listing_formats should be just right of the first proxy-logging middleware,
 1133 # and left of most other middlewares. If it is not already present, it will
 1134 # be automatically inserted for you.
 1135 [filter:listing_formats]
 1136 use = egg:swift#listing_formats
 1137 
 1138 # Note: Put after slo, dlo, versioned_writes, but before encryption in the
 1139 # pipeline.
 1140 [filter:symlink]
 1141 use = egg:swift#symlink
 1142 # Symlinks can point to other symlinks provided the number of symlinks in a
 1143 # chain does not exceed the symloop_max value. If the number of chained
 1144 # symlinks exceeds the limit symloop_max a 409 (HTTPConflict) error
 1145 # response will be produced.
 1146 # symloop_max = 2