"Fossies" - the Fresh Open Source Software Archive

Member "mesa-20.1.8/src/amd/vulkan/radv_pipeline_cache.c" (16 Sep 2020, 19820 Bytes) of package /linux/misc/mesa-20.1.8.tar.xz:


As a special service "Fossies" has tried to format the requested source page into HTML format using (guessed) C and C++ source code syntax highlighting (style: standard) with prefixed line numbers and code folding option. Alternatively you can here view or download the uninterpreted source code file. For more information about "radv_pipeline_cache.c" see the Fossies "Dox" file reference documentation and the last Fossies "Diffs" side-by-side code changes report: 20.1.5_vs_20.2.0-rc1.

    1 /*
    2  * Copyright © 2015 Intel Corporation
    3  *
    4  * Permission is hereby granted, free of charge, to any person obtaining a
    5  * copy of this software and associated documentation files (the "Software"),
    6  * to deal in the Software without restriction, including without limitation
    7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
    8  * and/or sell copies of the Software, and to permit persons to whom the
    9  * Software is furnished to do so, subject to the following conditions:
   10  *
   11  * The above copyright notice and this permission notice (including the next
   12  * paragraph) shall be included in all copies or substantial portions of the
   13  * Software.
   14  *
   15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
   16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
   17  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
   18  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
   19  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
   20  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
   21  * IN THE SOFTWARE.
   22  */
   23 
   24 #include "util/mesa-sha1.h"
   25 #include "util/debug.h"
   26 #include "util/disk_cache.h"
   27 #include "util/u_atomic.h"
   28 #include "radv_debug.h"
   29 #include "radv_private.h"
   30 #include "radv_shader.h"
   31 
   32 #include "ac_nir_to_llvm.h"
   33 
   34 struct cache_entry {
   35     union {
   36         unsigned char sha1[20];
   37         uint32_t sha1_dw[5];
   38     };
   39     uint32_t binary_sizes[MESA_SHADER_STAGES];
   40     struct radv_shader_variant *variants[MESA_SHADER_STAGES];
   41     char code[0];
   42 };
   43 
   44 void
   45 radv_pipeline_cache_init(struct radv_pipeline_cache *cache,
   46              struct radv_device *device)
   47 {
   48     cache->device = device;
   49     pthread_mutex_init(&cache->mutex, NULL);
   50 
   51     cache->modified = false;
   52     cache->kernel_count = 0;
   53     cache->total_size = 0;
   54     cache->table_size = 1024;
   55     const size_t byte_size = cache->table_size * sizeof(cache->hash_table[0]);
   56     cache->hash_table = malloc(byte_size);
   57 
   58     /* We don't consider allocation failure fatal, we just start with a 0-sized
   59      * cache. Disable caching when we want to keep shader debug info, since
   60      * we don't get the debug info on cached shaders. */
   61     if (cache->hash_table == NULL ||
   62         (device->instance->debug_flags & RADV_DEBUG_NO_CACHE))
   63         cache->table_size = 0;
   64     else
   65         memset(cache->hash_table, 0, byte_size);
   66 }
   67 
   68 void
   69 radv_pipeline_cache_finish(struct radv_pipeline_cache *cache)
   70 {
   71     for (unsigned i = 0; i < cache->table_size; ++i)
   72         if (cache->hash_table[i]) {
   73             for(int j = 0; j < MESA_SHADER_STAGES; ++j)  {
   74                 if (cache->hash_table[i]->variants[j])
   75                     radv_shader_variant_destroy(cache->device,
   76                                     cache->hash_table[i]->variants[j]);
   77             }
   78             vk_free(&cache->alloc, cache->hash_table[i]);
   79         }
   80     pthread_mutex_destroy(&cache->mutex);
   81     free(cache->hash_table);
   82 }
   83 
   84 static uint32_t
   85 entry_size(struct cache_entry *entry)
   86 {
   87     size_t ret = sizeof(*entry);
   88     for (int i = 0; i < MESA_SHADER_STAGES; ++i)
   89         if (entry->binary_sizes[i])
   90             ret += entry->binary_sizes[i];
   91     return ret;
   92 }
   93 
   94 void
   95 radv_hash_shaders(unsigned char *hash,
   96           const VkPipelineShaderStageCreateInfo **stages,
   97           const struct radv_pipeline_layout *layout,
   98           const struct radv_pipeline_key *key,
   99           uint32_t flags)
  100 {
  101     struct mesa_sha1 ctx;
  102 
  103     _mesa_sha1_init(&ctx);
  104     if (key)
  105         _mesa_sha1_update(&ctx, key, sizeof(*key));
  106     if (layout)
  107         _mesa_sha1_update(&ctx, layout->sha1, sizeof(layout->sha1));
  108 
  109     for (int i = 0; i < MESA_SHADER_STAGES; ++i) {
  110         if (stages[i]) {
  111             RADV_FROM_HANDLE(radv_shader_module, module, stages[i]->module);
  112             const VkSpecializationInfo *spec_info = stages[i]->pSpecializationInfo;
  113 
  114             _mesa_sha1_update(&ctx, module->sha1, sizeof(module->sha1));
  115             _mesa_sha1_update(&ctx, stages[i]->pName, strlen(stages[i]->pName));
  116             if (spec_info) {
  117                 _mesa_sha1_update(&ctx, spec_info->pMapEntries,
  118                                   spec_info->mapEntryCount * sizeof spec_info->pMapEntries[0]);
  119                 _mesa_sha1_update(&ctx, spec_info->pData, spec_info->dataSize);
  120             }
  121         }
  122     }
  123     _mesa_sha1_update(&ctx, &flags, 4);
  124     _mesa_sha1_final(&ctx, hash);
  125 }
  126 
  127 
  128 static struct cache_entry *
  129 radv_pipeline_cache_search_unlocked(struct radv_pipeline_cache *cache,
  130                     const unsigned char *sha1)
  131 {
  132     const uint32_t mask = cache->table_size - 1;
  133     const uint32_t start = (*(uint32_t *) sha1);
  134 
  135     if (cache->table_size == 0)
  136         return NULL;
  137 
  138     for (uint32_t i = 0; i < cache->table_size; i++) {
  139         const uint32_t index = (start + i) & mask;
  140         struct cache_entry *entry = cache->hash_table[index];
  141 
  142         if (!entry)
  143             return NULL;
  144 
  145         if (memcmp(entry->sha1, sha1, sizeof(entry->sha1)) == 0) {
  146             return entry;
  147         }
  148     }
  149 
  150     unreachable("hash table should never be full");
  151 }
  152 
  153 static struct cache_entry *
  154 radv_pipeline_cache_search(struct radv_pipeline_cache *cache,
  155                const unsigned char *sha1)
  156 {
  157     struct cache_entry *entry;
  158 
  159     pthread_mutex_lock(&cache->mutex);
  160 
  161     entry = radv_pipeline_cache_search_unlocked(cache, sha1);
  162 
  163     pthread_mutex_unlock(&cache->mutex);
  164 
  165     return entry;
  166 }
  167 
  168 static void
  169 radv_pipeline_cache_set_entry(struct radv_pipeline_cache *cache,
  170                   struct cache_entry *entry)
  171 {
  172     const uint32_t mask = cache->table_size - 1;
  173     const uint32_t start = entry->sha1_dw[0];
  174 
  175     /* We'll always be able to insert when we get here. */
  176     assert(cache->kernel_count < cache->table_size / 2);
  177 
  178     for (uint32_t i = 0; i < cache->table_size; i++) {
  179         const uint32_t index = (start + i) & mask;
  180         if (!cache->hash_table[index]) {
  181             cache->hash_table[index] = entry;
  182             break;
  183         }
  184     }
  185 
  186     cache->total_size += entry_size(entry);
  187     cache->kernel_count++;
  188 }
  189 
  190 
  191 static VkResult
  192 radv_pipeline_cache_grow(struct radv_pipeline_cache *cache)
  193 {
  194     const uint32_t table_size = cache->table_size * 2;
  195     const uint32_t old_table_size = cache->table_size;
  196     const size_t byte_size = table_size * sizeof(cache->hash_table[0]);
  197     struct cache_entry **table;
  198     struct cache_entry **old_table = cache->hash_table;
  199 
  200     table = malloc(byte_size);
  201     if (table == NULL)
  202         return vk_error(cache->device->instance, VK_ERROR_OUT_OF_HOST_MEMORY);
  203 
  204     cache->hash_table = table;
  205     cache->table_size = table_size;
  206     cache->kernel_count = 0;
  207     cache->total_size = 0;
  208 
  209     memset(cache->hash_table, 0, byte_size);
  210     for (uint32_t i = 0; i < old_table_size; i++) {
  211         struct cache_entry *entry = old_table[i];
  212         if (!entry)
  213             continue;
  214 
  215         radv_pipeline_cache_set_entry(cache, entry);
  216     }
  217 
  218     free(old_table);
  219 
  220     return VK_SUCCESS;
  221 }
  222 
  223 static void
  224 radv_pipeline_cache_add_entry(struct radv_pipeline_cache *cache,
  225                   struct cache_entry *entry)
  226 {
  227     if (cache->kernel_count == cache->table_size / 2)
  228         radv_pipeline_cache_grow(cache);
  229 
  230     /* Failing to grow that hash table isn't fatal, but may mean we don't
  231      * have enough space to add this new kernel. Only add it if there's room.
  232      */
  233     if (cache->kernel_count < cache->table_size / 2)
  234         radv_pipeline_cache_set_entry(cache, entry);
  235 }
  236 
  237 static bool
  238 radv_is_cache_disabled(struct radv_device *device)
  239 {
  240     /* Pipeline caches can be disabled with RADV_DEBUG=nocache, with
  241      * MESA_GLSL_CACHE_DISABLE=1, and when VK_AMD_shader_info is requested.
  242      */
  243     return (device->instance->debug_flags & RADV_DEBUG_NO_CACHE);
  244 }
  245 
  246 /*
  247  * Secure compiles cannot open files so we get the parent process to load the
  248  * cache entry for us.
  249  */
  250 static struct cache_entry *
  251 radv_sc_read_from_disk_cache(struct radv_device *device, uint8_t *disk_sha1)
  252 {
  253     struct cache_entry *entry;
  254     unsigned process = device->sc_state->secure_compile_thread_counter;
  255     enum radv_secure_compile_type sc_type = RADV_SC_TYPE_READ_DISK_CACHE;
  256 
  257     write(device->sc_state->secure_compile_processes[process].fd_secure_output,
  258           &sc_type, sizeof(enum radv_secure_compile_type));
  259     write(device->sc_state->secure_compile_processes[process].fd_secure_output,
  260           disk_sha1, sizeof(uint8_t) * 20);
  261 
  262     uint8_t found_cache_entry;
  263     if (!radv_sc_read(device->sc_state->secure_compile_processes[process].fd_secure_input,
  264               &found_cache_entry, sizeof(uint8_t), true))
  265         return NULL;
  266 
  267     if (found_cache_entry) {
  268         size_t entry_size;
  269         if (!radv_sc_read(device->sc_state->secure_compile_processes[process].fd_secure_input,
  270                   &entry_size, sizeof(size_t), true))
  271             return NULL;
  272 
  273         entry = malloc(entry_size);
  274         if (!radv_sc_read(device->sc_state->secure_compile_processes[process].fd_secure_input,
  275                   entry, entry_size, true))
  276             return NULL;
  277 
  278         return entry;
  279     }
  280 
  281     return NULL;
  282 }
  283 
  284 /*
  285  * Secure compiles cannot open files so we get the parent process to write to
  286  * the disk cache for us.
  287  */
  288 static void
  289 radv_sc_write_to_disk_cache(struct radv_device *device, uint8_t *disk_sha1,
  290                 struct cache_entry *entry)
  291 {
  292     unsigned process = device->sc_state->secure_compile_thread_counter;
  293     enum radv_secure_compile_type sc_type = RADV_SC_TYPE_WRITE_DISK_CACHE;
  294 
  295     write(device->sc_state->secure_compile_processes[process].fd_secure_output,
  296           &sc_type, sizeof(enum radv_secure_compile_type));
  297     write(device->sc_state->secure_compile_processes[process].fd_secure_output,
  298           disk_sha1, sizeof(uint8_t) * 20);
  299 
  300     uint32_t size = entry_size(entry);
  301     write(device->sc_state->secure_compile_processes[process].fd_secure_output,
  302           &size, sizeof(uint32_t));
  303     write(device->sc_state->secure_compile_processes[process].fd_secure_output,
  304           entry, size);
  305 }
  306 
  307 bool
  308 radv_create_shader_variants_from_pipeline_cache(struct radv_device *device,
  309                             struct radv_pipeline_cache *cache,
  310                             const unsigned char *sha1,
  311                             struct radv_shader_variant **variants,
  312                         bool *found_in_application_cache)
  313 {
  314     struct cache_entry *entry;
  315 
  316     if (!cache) {
  317         cache = device->mem_cache;
  318         *found_in_application_cache = false;
  319     }
  320 
  321     pthread_mutex_lock(&cache->mutex);
  322 
  323     entry = radv_pipeline_cache_search_unlocked(cache, sha1);
  324 
  325     if (!entry) {
  326         *found_in_application_cache = false;
  327 
  328         /* Don't cache when we want debug info, since this isn't
  329          * present in the cache.
  330          */
  331         if (radv_is_cache_disabled(device) || !device->physical_device->disk_cache) {
  332             pthread_mutex_unlock(&cache->mutex);
  333             return false;
  334         }
  335 
  336         uint8_t disk_sha1[20];
  337         disk_cache_compute_key(device->physical_device->disk_cache,
  338                        sha1, 20, disk_sha1);
  339 
  340         if (radv_device_use_secure_compile(device->instance)) {
  341             entry = radv_sc_read_from_disk_cache(device, disk_sha1);
  342         } else {
  343             entry = (struct cache_entry *)
  344                 disk_cache_get(device->physical_device->disk_cache,
  345                            disk_sha1, NULL);
  346         }
  347 
  348         if (!entry) {
  349             pthread_mutex_unlock(&cache->mutex);
  350             return false;
  351         } else {
  352             size_t size = entry_size(entry);
  353             struct cache_entry *new_entry = vk_alloc(&cache->alloc, size, 8,
  354                                  VK_SYSTEM_ALLOCATION_SCOPE_CACHE);
  355             if (!new_entry) {
  356                 free(entry);
  357                 pthread_mutex_unlock(&cache->mutex);
  358                 return false;
  359             }
  360 
  361             memcpy(new_entry, entry, entry_size(entry));
  362             free(entry);
  363             entry = new_entry;
  364 
  365             if (!(device->instance->debug_flags & RADV_DEBUG_NO_MEMORY_CACHE) ||
  366                 cache != device->mem_cache)
  367                 radv_pipeline_cache_add_entry(cache, new_entry);
  368         }
  369     }
  370 
  371     char *p = entry->code;
  372     for(int i = 0; i < MESA_SHADER_STAGES; ++i) {
  373         if (!entry->variants[i] && entry->binary_sizes[i]) {
  374             struct radv_shader_binary *binary = calloc(1, entry->binary_sizes[i]);
  375             memcpy(binary, p, entry->binary_sizes[i]);
  376             p += entry->binary_sizes[i];
  377 
  378             entry->variants[i] = radv_shader_variant_create(device, binary, false);
  379             free(binary);
  380         } else if (entry->binary_sizes[i]) {
  381             p += entry->binary_sizes[i];
  382         }
  383 
  384     }
  385 
  386     memcpy(variants, entry->variants, sizeof(entry->variants));
  387 
  388     if (device->instance->debug_flags & RADV_DEBUG_NO_MEMORY_CACHE &&
  389         cache == device->mem_cache)
  390         vk_free(&cache->alloc, entry);
  391     else {
  392         for (int i = 0; i < MESA_SHADER_STAGES; ++i)
  393             if (entry->variants[i])
  394                 p_atomic_inc(&entry->variants[i]->ref_count);
  395     }
  396 
  397     pthread_mutex_unlock(&cache->mutex);
  398     return true;
  399 }
  400 
  401 void
  402 radv_pipeline_cache_insert_shaders(struct radv_device *device,
  403                    struct radv_pipeline_cache *cache,
  404                    const unsigned char *sha1,
  405                    struct radv_shader_variant **variants,
  406                    struct radv_shader_binary *const *binaries)
  407 {
  408     if (!cache)
  409         cache = device->mem_cache;
  410 
  411     pthread_mutex_lock(&cache->mutex);
  412     struct cache_entry *entry = radv_pipeline_cache_search_unlocked(cache, sha1);
  413     if (entry) {
  414         for (int i = 0; i < MESA_SHADER_STAGES; ++i) {
  415             if (entry->variants[i]) {
  416                 radv_shader_variant_destroy(cache->device, variants[i]);
  417                 variants[i] = entry->variants[i];
  418             } else {
  419                 entry->variants[i] = variants[i];
  420             }
  421             if (variants[i])
  422                 p_atomic_inc(&variants[i]->ref_count);
  423         }
  424         pthread_mutex_unlock(&cache->mutex);
  425         return;
  426     }
  427 
  428     /* Don't cache when we want debug info, since this isn't
  429      * present in the cache.
  430      */
  431     if (radv_is_cache_disabled(device)) {
  432         pthread_mutex_unlock(&cache->mutex);
  433         return;
  434     }
  435 
  436     size_t size = sizeof(*entry);
  437     for (int i = 0; i < MESA_SHADER_STAGES; ++i)
  438         if (variants[i])
  439             size += binaries[i]->total_size;
  440 
  441 
  442     entry = vk_alloc(&cache->alloc, size, 8,
  443                VK_SYSTEM_ALLOCATION_SCOPE_CACHE);
  444     if (!entry) {
  445         pthread_mutex_unlock(&cache->mutex);
  446         return;
  447     }
  448 
  449     memset(entry, 0, sizeof(*entry));
  450     memcpy(entry->sha1, sha1, 20);
  451 
  452     char* p = entry->code;
  453 
  454     for (int i = 0; i < MESA_SHADER_STAGES; ++i) {
  455         if (!variants[i])
  456             continue;
  457 
  458         entry->binary_sizes[i] = binaries[i]->total_size;
  459 
  460         memcpy(p, binaries[i], binaries[i]->total_size);
  461         p += binaries[i]->total_size;
  462     }
  463 
  464     /* Always add cache items to disk. This will allow collection of
  465      * compiled shaders by third parties such as steam, even if the app
  466      * implements its own pipeline cache.
  467      */
  468     if (device->physical_device->disk_cache) {
  469         uint8_t disk_sha1[20];
  470         disk_cache_compute_key(device->physical_device->disk_cache, sha1, 20,
  471                    disk_sha1);
  472 
  473         /* Write the cache item out to the parent of this forked
  474          * process.
  475          */
  476         if (radv_device_use_secure_compile(device->instance)) {
  477             radv_sc_write_to_disk_cache(device, disk_sha1, entry);
  478         } else {
  479             disk_cache_put(device->physical_device->disk_cache,
  480                        disk_sha1, entry, entry_size(entry),
  481                        NULL);
  482         }
  483     }
  484 
  485     if (device->instance->debug_flags & RADV_DEBUG_NO_MEMORY_CACHE &&
  486         cache == device->mem_cache) {
  487         vk_free2(&cache->alloc, NULL, entry);
  488         pthread_mutex_unlock(&cache->mutex);
  489         return;
  490     }
  491 
  492     /* We delay setting the variant so we have reproducible disk cache
  493      * items.
  494      */
  495     for (int i = 0; i < MESA_SHADER_STAGES; ++i) {
  496         if (!variants[i])
  497             continue;
  498 
  499         entry->variants[i] = variants[i];
  500         p_atomic_inc(&variants[i]->ref_count);
  501     }
  502 
  503     radv_pipeline_cache_add_entry(cache, entry);
  504 
  505     cache->modified = true;
  506     pthread_mutex_unlock(&cache->mutex);
  507     return;
  508 }
  509 
  510 struct cache_header {
  511     uint32_t header_size;
  512     uint32_t header_version;
  513     uint32_t vendor_id;
  514     uint32_t device_id;
  515     uint8_t  uuid[VK_UUID_SIZE];
  516 };
  517 
  518 bool
  519 radv_pipeline_cache_load(struct radv_pipeline_cache *cache,
  520              const void *data, size_t size)
  521 {
  522     struct radv_device *device = cache->device;
  523     struct cache_header header;
  524 
  525     if (size < sizeof(header))
  526         return false;
  527     memcpy(&header, data, sizeof(header));
  528     if (header.header_size < sizeof(header))
  529         return false;
  530     if (header.header_version != VK_PIPELINE_CACHE_HEADER_VERSION_ONE)
  531         return false;
  532     if (header.vendor_id != ATI_VENDOR_ID)
  533         return false;
  534     if (header.device_id != device->physical_device->rad_info.pci_id)
  535         return false;
  536     if (memcmp(header.uuid, device->physical_device->cache_uuid, VK_UUID_SIZE) != 0)
  537         return false;
  538 
  539     char *end = (void *) data + size;
  540     char *p = (void *) data + header.header_size;
  541 
  542     while (end - p >= sizeof(struct cache_entry)) {
  543         struct cache_entry *entry = (struct cache_entry*)p;
  544         struct cache_entry *dest_entry;
  545         size_t size = entry_size(entry);
  546         if(end - p < size)
  547             break;
  548 
  549         dest_entry = vk_alloc(&cache->alloc, size,
  550                     8, VK_SYSTEM_ALLOCATION_SCOPE_CACHE);
  551         if (dest_entry) {
  552             memcpy(dest_entry, entry, size);
  553             for (int i = 0; i < MESA_SHADER_STAGES; ++i)
  554                 dest_entry->variants[i] = NULL;
  555             radv_pipeline_cache_add_entry(cache, dest_entry);
  556         }
  557         p += size;
  558     }
  559 
  560     return true;
  561 }
  562 
  563 VkResult radv_CreatePipelineCache(
  564     VkDevice                                    _device,
  565     const VkPipelineCacheCreateInfo*            pCreateInfo,
  566     const VkAllocationCallbacks*                pAllocator,
  567     VkPipelineCache*                            pPipelineCache)
  568 {
  569     RADV_FROM_HANDLE(radv_device, device, _device);
  570     struct radv_pipeline_cache *cache;
  571 
  572     assert(pCreateInfo->sType == VK_STRUCTURE_TYPE_PIPELINE_CACHE_CREATE_INFO);
  573     assert(pCreateInfo->flags == 0);
  574 
  575     cache = vk_alloc2(&device->alloc, pAllocator,
  576                 sizeof(*cache), 8,
  577                 VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
  578     if (cache == NULL)
  579         return vk_error(device->instance, VK_ERROR_OUT_OF_HOST_MEMORY);
  580 
  581     if (pAllocator)
  582         cache->alloc = *pAllocator;
  583     else
  584         cache->alloc = device->alloc;
  585 
  586     radv_pipeline_cache_init(cache, device);
  587 
  588     if (pCreateInfo->initialDataSize > 0) {
  589         radv_pipeline_cache_load(cache,
  590                      pCreateInfo->pInitialData,
  591                      pCreateInfo->initialDataSize);
  592     }
  593 
  594     *pPipelineCache = radv_pipeline_cache_to_handle(cache);
  595 
  596     return VK_SUCCESS;
  597 }
  598 
  599 void radv_DestroyPipelineCache(
  600     VkDevice                                    _device,
  601     VkPipelineCache                             _cache,
  602     const VkAllocationCallbacks*                pAllocator)
  603 {
  604     RADV_FROM_HANDLE(radv_device, device, _device);
  605     RADV_FROM_HANDLE(radv_pipeline_cache, cache, _cache);
  606 
  607     if (!cache)
  608         return;
  609     radv_pipeline_cache_finish(cache);
  610 
  611     vk_free2(&device->alloc, pAllocator, cache);
  612 }
  613 
  614 VkResult radv_GetPipelineCacheData(
  615     VkDevice                                    _device,
  616     VkPipelineCache                             _cache,
  617     size_t*                                     pDataSize,
  618     void*                                       pData)
  619 {
  620     RADV_FROM_HANDLE(radv_device, device, _device);
  621     RADV_FROM_HANDLE(radv_pipeline_cache, cache, _cache);
  622     struct cache_header *header;
  623     VkResult result = VK_SUCCESS;
  624 
  625     pthread_mutex_lock(&cache->mutex);
  626 
  627     const size_t size = sizeof(*header) + cache->total_size;
  628     if (pData == NULL) {
  629         pthread_mutex_unlock(&cache->mutex);
  630         *pDataSize = size;
  631         return VK_SUCCESS;
  632     }
  633     if (*pDataSize < sizeof(*header)) {
  634         pthread_mutex_unlock(&cache->mutex);
  635         *pDataSize = 0;
  636         return VK_INCOMPLETE;
  637     }
  638     void *p = pData, *end = pData + *pDataSize;
  639     header = p;
  640     header->header_size = sizeof(*header);
  641     header->header_version = VK_PIPELINE_CACHE_HEADER_VERSION_ONE;
  642     header->vendor_id = ATI_VENDOR_ID;
  643     header->device_id = device->physical_device->rad_info.pci_id;
  644     memcpy(header->uuid, device->physical_device->cache_uuid, VK_UUID_SIZE);
  645     p += header->header_size;
  646 
  647     struct cache_entry *entry;
  648     for (uint32_t i = 0; i < cache->table_size; i++) {
  649         if (!cache->hash_table[i])
  650             continue;
  651         entry = cache->hash_table[i];
  652         const uint32_t size = entry_size(entry);
  653         if (end < p + size) {
  654             result = VK_INCOMPLETE;
  655             break;
  656         }
  657 
  658         memcpy(p, entry, size);
  659         for(int j = 0; j < MESA_SHADER_STAGES; ++j)
  660             ((struct cache_entry*)p)->variants[j] = NULL;
  661         p += size;
  662     }
  663     *pDataSize = p - pData;
  664 
  665     pthread_mutex_unlock(&cache->mutex);
  666     return result;
  667 }
  668 
  669 static void
  670 radv_pipeline_cache_merge(struct radv_pipeline_cache *dst,
  671               struct radv_pipeline_cache *src)
  672 {
  673     for (uint32_t i = 0; i < src->table_size; i++) {
  674         struct cache_entry *entry = src->hash_table[i];
  675         if (!entry || radv_pipeline_cache_search(dst, entry->sha1))
  676             continue;
  677 
  678         radv_pipeline_cache_add_entry(dst, entry);
  679 
  680         src->hash_table[i] = NULL;
  681     }
  682 }
  683 
  684 VkResult radv_MergePipelineCaches(
  685     VkDevice                                    _device,
  686     VkPipelineCache                             destCache,
  687     uint32_t                                    srcCacheCount,
  688     const VkPipelineCache*                      pSrcCaches)
  689 {
  690     RADV_FROM_HANDLE(radv_pipeline_cache, dst, destCache);
  691 
  692     for (uint32_t i = 0; i < srcCacheCount; i++) {
  693         RADV_FROM_HANDLE(radv_pipeline_cache, src, pSrcCaches[i]);
  694 
  695         radv_pipeline_cache_merge(dst, src);
  696     }
  697 
  698     return VK_SUCCESS;
  699 }