"Fossies" - the Fresh Open Source Software Archive

Member "xen-4.15.1/xen/arch/arm/gic-v2.c" (10 Sep 2021, 41107 Bytes) of package /linux/misc/xen-4.15.1.tar.gz:


As a special service "Fossies" has tried to format the requested source page into HTML format using (guessed) C and C++ source code syntax highlighting (style: standard) with prefixed line numbers and code folding option. Alternatively you can here view or download the uninterpreted source code file. For more information about "gic-v2.c" see the Fossies "Dox" file reference documentation and the last Fossies "Diffs" side-by-side code changes report: 4.14.1_vs_4.15.0.

    1 /*
    2  * xen/arch/arm/gic-v2.c
    3  *
    4  * ARM Generic Interrupt Controller support v2
    5  *
    6  * Tim Deegan <tim@xen.org>
    7  * Copyright (c) 2011 Citrix Systems.
    8  *
    9  * This program is free software; you can redistribute it and/or modify
   10  * it under the terms of the GNU General Public License as published by
   11  * the Free Software Foundation; either version 2 of the License, or
   12  * (at your option) any later version.
   13  *
   14  * This program is distributed in the hope that it will be useful,
   15  * but WITHOUT ANY WARRANTY; without even the implied warranty of
   16  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
   17  * GNU General Public License for more details.
   18  */
   19 
   20 #include <xen/lib.h>
   21 #include <xen/init.h>
   22 #include <xen/mm.h>
   23 #include <xen/vmap.h>
   24 #include <xen/irq.h>
   25 #include <xen/iocap.h>
   26 #include <xen/sched.h>
   27 #include <xen/errno.h>
   28 #include <xen/softirq.h>
   29 #include <xen/list.h>
   30 #include <xen/device_tree.h>
   31 #include <xen/libfdt/libfdt.h>
   32 #include <xen/sizes.h>
   33 #include <xen/acpi.h>
   34 #include <acpi/actables.h>
   35 #include <asm/p2m.h>
   36 #include <asm/domain.h>
   37 #include <asm/platform.h>
   38 #include <asm/device.h>
   39 
   40 #include <asm/io.h>
   41 #include <asm/gic.h>
   42 #include <asm/acpi.h>
   43 
   44 /*
   45  * LR register definitions are GIC v2 specific.
   46  * Moved these definitions from header file to here
   47  */
   48 #define GICH_V2_LR_VIRTUAL_MASK    0x3ff
   49 #define GICH_V2_LR_VIRTUAL_SHIFT   0
   50 #define GICH_V2_LR_PHYSICAL_MASK   0x3ff
   51 #define GICH_V2_LR_PHYSICAL_SHIFT  10
   52 #define GICH_V2_LR_STATE_MASK      0x3
   53 #define GICH_V2_LR_STATE_SHIFT     28
   54 #define GICH_V2_LR_PENDING         (1U << 28)
   55 #define GICH_V2_LR_ACTIVE          (1U << 29)
   56 #define GICH_V2_LR_PRIORITY_SHIFT  23
   57 #define GICH_V2_LR_PRIORITY_MASK   0x1f
   58 #define GICH_V2_LR_HW_SHIFT        31
   59 #define GICH_V2_LR_HW_MASK         0x1
   60 #define GICH_V2_LR_GRP_SHIFT       30
   61 #define GICH_V2_LR_GRP_MASK        0x1
   62 #define GICH_V2_LR_MAINTENANCE_IRQ (1U << 19)
   63 #define GICH_V2_LR_GRP1            (1U << 30)
   64 #define GICH_V2_LR_HW              (1U << GICH_V2_LR_HW_SHIFT)
   65 #define GICH_V2_LR_CPUID_SHIFT     10
   66 #define GICH_V2_LR_CPUID_MASK      0x7
   67 #define GICH_V2_VTR_NRLRGS         0x3f
   68 
   69 #define GICH_V2_VMCR_PRIORITY_MASK   0x1f
   70 #define GICH_V2_VMCR_PRIORITY_SHIFT  27
   71 
   72 /* GICv2m extension register definitions. */
   73 /*
   74 * MSI_TYPER:
   75 *     [31:26] Reserved
   76 *     [25:16] lowest SPI assigned to MSI
   77 *     [15:10] Reserved
   78 *     [9:0]   Number of SPIs assigned to MSI
   79 */
   80 #define V2M_MSI_TYPER               0x008
   81 #define V2M_MSI_TYPER_BASE_SHIFT    16
   82 #define V2M_MSI_TYPER_BASE_MASK     0x3FF
   83 #define V2M_MSI_TYPER_NUM_MASK      0x3FF
   84 #define V2M_MSI_SETSPI_NS           0x040
   85 #define V2M_MIN_SPI                 32
   86 #define V2M_MAX_SPI                 1019
   87 #define V2M_MSI_IIDR                0xFCC
   88 
   89 #define V2M_MSI_TYPER_BASE_SPI(x)   \
   90                 (((x) >> V2M_MSI_TYPER_BASE_SHIFT) & V2M_MSI_TYPER_BASE_MASK)
   91 
   92 #define V2M_MSI_TYPER_NUM_SPI(x)    ((x) & V2M_MSI_TYPER_NUM_MASK)
   93 
   94 struct v2m_data {
   95     struct list_head entry;
   96     /* Pointer to the DT node representing the v2m frame */
   97     const struct dt_device_node *dt_node;
   98     paddr_t addr; /* Register frame base */
   99     paddr_t size; /* Register frame size */
  100     u32 spi_start; /* The SPI number that MSIs start */
  101     u32 nr_spis; /* The number of SPIs for MSIs */
  102 };
  103 
  104 /* v2m extension register frame information list */
  105 static LIST_HEAD(gicv2m_info);
  106 
  107 /* Global state */
  108 static struct {
  109     void __iomem * map_dbase; /* IO mapped Address of distributor registers */
  110     void __iomem * map_cbase; /* IO mapped Address of CPU interface registers */
  111     void __iomem * map_hbase; /* IO Address of virtual interface registers */
  112     spinlock_t lock;
  113 } gicv2;
  114 
  115 static struct gic_info gicv2_info;
  116 
  117 /* The GIC mapping of CPU interfaces does not necessarily match the
  118  * logical CPU numbering. Let's use mapping as returned by the GIC
  119  * itself
  120  */
  121 static DEFINE_PER_CPU(u8, gic_cpu_id);
  122 
  123 /* Maximum cpu interface per GIC */
  124 #define NR_GIC_CPU_IF 8
  125 
  126 static inline void writeb_gicd(uint8_t val, unsigned int offset)
  127 {
  128     writeb_relaxed(val, gicv2.map_dbase + offset);
  129 }
  130 
  131 static inline void writel_gicd(uint32_t val, unsigned int offset)
  132 {
  133     writel_relaxed(val, gicv2.map_dbase + offset);
  134 }
  135 
  136 static inline uint32_t readl_gicd(unsigned int offset)
  137 {
  138     return readl_relaxed(gicv2.map_dbase + offset);
  139 }
  140 
  141 static inline void writel_gicc(uint32_t val, unsigned int offset)
  142 {
  143     writel_relaxed(val, gicv2.map_cbase + offset);
  144 }
  145 
  146 static inline uint32_t readl_gicc(unsigned int offset)
  147 {
  148     return readl_relaxed(gicv2.map_cbase + offset);
  149 }
  150 
  151 static inline void writel_gich(uint32_t val, unsigned int offset)
  152 {
  153     writel_relaxed(val, gicv2.map_hbase + offset);
  154 }
  155 
  156 static inline uint32_t readl_gich(int unsigned offset)
  157 {
  158     return readl_relaxed(gicv2.map_hbase + offset);
  159 }
  160 
  161 static unsigned int gicv2_cpu_mask(const cpumask_t *cpumask)
  162 {
  163     unsigned int cpu;
  164     unsigned int mask = 0;
  165     cpumask_t possible_mask;
  166 
  167     cpumask_and(&possible_mask, cpumask, &cpu_possible_map);
  168     for_each_cpu( cpu, &possible_mask )
  169     {
  170         ASSERT(cpu < NR_GIC_CPU_IF);
  171         mask |= per_cpu(gic_cpu_id, cpu);
  172     }
  173 
  174     return mask;
  175 }
  176 
  177 static void gicv2_save_state(struct vcpu *v)
  178 {
  179     int i;
  180 
  181     /* No need for spinlocks here because interrupts are disabled around
  182      * this call and it only accesses struct vcpu fields that cannot be
  183      * accessed simultaneously by another pCPU.
  184      */
  185     for ( i = 0; i < gicv2_info.nr_lrs; i++ )
  186         v->arch.gic.v2.lr[i] = readl_gich(GICH_LR + i * 4);
  187 
  188     v->arch.gic.v2.apr = readl_gich(GICH_APR);
  189     v->arch.gic.v2.vmcr = readl_gich(GICH_VMCR);
  190     /* Disable until next VCPU scheduled */
  191     writel_gich(0, GICH_HCR);
  192 }
  193 
  194 static void gicv2_restore_state(const struct vcpu *v)
  195 {
  196     int i;
  197 
  198     for ( i = 0; i < gicv2_info.nr_lrs; i++ )
  199         writel_gich(v->arch.gic.v2.lr[i], GICH_LR + i * 4);
  200 
  201     writel_gich(v->arch.gic.v2.apr, GICH_APR);
  202     writel_gich(v->arch.gic.v2.vmcr, GICH_VMCR);
  203     writel_gich(GICH_HCR_EN, GICH_HCR);
  204 }
  205 
  206 static void gicv2_dump_state(const struct vcpu *v)
  207 {
  208     int i;
  209 
  210     if ( v == current )
  211     {
  212         for ( i = 0; i < gicv2_info.nr_lrs; i++ )
  213             printk("   HW_LR[%d]=%x\n", i,
  214                    readl_gich(GICH_LR + i * 4));
  215     }
  216     else
  217     {
  218         for ( i = 0; i < gicv2_info.nr_lrs; i++ )
  219             printk("   VCPU_LR[%d]=%x\n", i, v->arch.gic.v2.lr[i]);
  220     }
  221 }
  222 
  223 static void gicv2_eoi_irq(struct irq_desc *irqd)
  224 {
  225     int irq = irqd->irq;
  226     /* Lower the priority */
  227     writel_gicc(irq, GICC_EOIR);
  228 }
  229 
  230 static void gicv2_dir_irq(struct irq_desc *irqd)
  231 {
  232     /* Deactivate */
  233     writel_gicc(irqd->irq, GICC_DIR);
  234 }
  235 
  236 static unsigned int gicv2_read_irq(void)
  237 {
  238     return (readl_gicc(GICC_IAR) & GICC_IA_IRQ);
  239 }
  240 
  241 static void gicv2_poke_irq(struct irq_desc *irqd, uint32_t offset)
  242 {
  243     writel_gicd(1U << (irqd->irq % 32), offset + (irqd->irq / 32) * 4);
  244 }
  245 
  246 static bool gicv2_peek_irq(struct irq_desc *irqd, uint32_t offset)
  247 {
  248     uint32_t reg;
  249 
  250     reg = readl_gicd(offset + (irqd->irq / 32) * 4) & (1U << (irqd->irq % 32));
  251 
  252     return reg;
  253 }
  254 
  255 /*
  256  * This is forcing the active state of an interrupt, somewhat circumventing
  257  * the normal interrupt flow and the GIC state machine. So use with care
  258  * and only if you know what you are doing. For this reason we also have to
  259  * tinker with the _IRQ_INPROGRESS bit here, since the normal IRQ handler
  260  * will not be involved.
  261  */
  262 static void gicv2_set_active_state(struct irq_desc *irqd, bool active)
  263 {
  264     ASSERT(spin_is_locked(&irqd->lock));
  265 
  266     if ( active )
  267     {
  268         set_bit(_IRQ_INPROGRESS, &irqd->status);
  269         gicv2_poke_irq(irqd, GICD_ISACTIVER);
  270     }
  271     else
  272     {
  273         clear_bit(_IRQ_INPROGRESS, &irqd->status);
  274         gicv2_poke_irq(irqd, GICD_ICACTIVER);
  275     }
  276 }
  277 
  278 static void gicv2_set_pending_state(struct irq_desc *irqd, bool pending)
  279 {
  280     ASSERT(spin_is_locked(&irqd->lock));
  281 
  282     if ( pending )
  283     {
  284         /* The _IRQ_INPROGRESS bit will be set when the interrupt fires. */
  285         gicv2_poke_irq(irqd, GICD_ISPENDR);
  286     }
  287     else
  288     {
  289         /* The _IRQ_INPROGRESS remains unchanged. */
  290         gicv2_poke_irq(irqd, GICD_ICPENDR);
  291     }
  292 }
  293 
  294 static void gicv2_set_irq_type(struct irq_desc *desc, unsigned int type)
  295 {
  296     uint32_t cfg, actual, edgebit;
  297     unsigned int irq = desc->irq;
  298 
  299     spin_lock(&gicv2.lock);
  300     /* Set edge / level */
  301     cfg = readl_gicd(GICD_ICFGR + (irq / 16) * 4);
  302     edgebit = 2u << (2 * (irq % 16));
  303     if ( type & IRQ_TYPE_LEVEL_MASK )
  304         cfg &= ~edgebit;
  305     else if ( type & IRQ_TYPE_EDGE_BOTH )
  306         cfg |= edgebit;
  307     writel_gicd(cfg, GICD_ICFGR + (irq / 16) * 4);
  308 
  309     actual = readl_gicd(GICD_ICFGR + (irq / 16) * 4);
  310     if ( ( cfg & edgebit ) ^ ( actual & edgebit ) )
  311     {
  312         printk(XENLOG_WARNING "GICv2: WARNING: "
  313                "CPU%d: Failed to configure IRQ%u as %s-triggered. "
  314                "H/w forces to %s-triggered.\n",
  315                smp_processor_id(), desc->irq,
  316                cfg & edgebit ? "Edge" : "Level",
  317                actual & edgebit ? "Edge" : "Level");
  318         desc->arch.type = actual & edgebit ?
  319             IRQ_TYPE_EDGE_RISING :
  320             IRQ_TYPE_LEVEL_HIGH;
  321     }
  322 
  323     spin_unlock(&gicv2.lock);
  324 }
  325 
  326 static void gicv2_set_irq_priority(struct irq_desc *desc,
  327                                    unsigned int priority)
  328 {
  329     unsigned int irq = desc->irq;
  330 
  331     spin_lock(&gicv2.lock);
  332 
  333     /* Set priority */
  334     writeb_gicd(priority, GICD_IPRIORITYR + irq);
  335 
  336     spin_unlock(&gicv2.lock);
  337 }
  338 
  339 static void __init gicv2_dist_init(void)
  340 {
  341     uint32_t type;
  342     uint32_t cpumask;
  343     uint32_t gic_cpus;
  344     unsigned int nr_lines;
  345     int i;
  346 
  347     cpumask = readl_gicd(GICD_ITARGETSR) & 0xff;
  348     cpumask |= cpumask << 8;
  349     cpumask |= cpumask << 16;
  350 
  351     /* Disable the distributor */
  352     writel_gicd(0, GICD_CTLR);
  353 
  354     type = readl_gicd(GICD_TYPER);
  355     nr_lines = 32 * ((type & GICD_TYPE_LINES) + 1);
  356     /* Only 1020 interrupts are supported */
  357     nr_lines = min(1020U, nr_lines);
  358     gicv2_info.nr_lines = nr_lines;
  359 
  360     gic_cpus = 1 + ((type & GICD_TYPE_CPUS) >> 5);
  361     printk("GICv2: %d lines, %d cpu%s%s (IID %8.8x).\n",
  362            nr_lines, gic_cpus, (gic_cpus == 1) ? "" : "s",
  363            (type & GICD_TYPE_SEC) ? ", secure" : "",
  364            readl_gicd(GICD_IIDR));
  365 
  366     /* Default all global IRQs to level, active low */
  367     for ( i = 32; i < nr_lines; i += 16 )
  368         writel_gicd(0x0, GICD_ICFGR + (i / 16) * 4);
  369 
  370     /* Route all global IRQs to this CPU */
  371     for ( i = 32; i < nr_lines; i += 4 )
  372         writel_gicd(cpumask, GICD_ITARGETSR + (i / 4) * 4);
  373 
  374     /* Default priority for global interrupts */
  375     for ( i = 32; i < nr_lines; i += 4 )
  376         writel_gicd(GIC_PRI_IRQ << 24 | GIC_PRI_IRQ << 16 |
  377                     GIC_PRI_IRQ << 8 | GIC_PRI_IRQ,
  378                     GICD_IPRIORITYR + (i / 4) * 4);
  379 
  380     /* Disable all global interrupts */
  381     for ( i = 32; i < nr_lines; i += 32 )
  382     {
  383         writel_gicd(~0x0, GICD_ICENABLER + (i / 32) * 4);
  384         writel_gicd(~0x0, GICD_ICACTIVER + (i / 32) * 4);
  385     }
  386 
  387     /* Turn on the distributor */
  388     writel_gicd(GICD_CTL_ENABLE, GICD_CTLR);
  389 }
  390 
  391 static void gicv2_cpu_init(void)
  392 {
  393     int i;
  394 
  395     this_cpu(gic_cpu_id) = readl_gicd(GICD_ITARGETSR) & 0xff;
  396 
  397     /* The first 32 interrupts (PPI and SGI) are banked per-cpu, so
  398      * even though they are controlled with GICD registers, they must
  399      * be set up here with the other per-cpu state. */
  400     writel_gicd(0xffffffff, GICD_ICACTIVER); /* Diactivate PPIs and SGIs */
  401     writel_gicd(0xffff0000, GICD_ICENABLER); /* Disable all PPI */
  402     writel_gicd(0x0000ffff, GICD_ISENABLER); /* Enable all SGI */
  403 
  404     /* Set SGI priorities */
  405     for ( i = 0; i < 16; i += 4 )
  406         writel_gicd(GIC_PRI_IPI << 24 | GIC_PRI_IPI << 16 |
  407                     GIC_PRI_IPI << 8 | GIC_PRI_IPI,
  408                     GICD_IPRIORITYR + (i / 4) * 4);
  409 
  410     /* Set PPI priorities */
  411     for ( i = 16; i < 32; i += 4 )
  412         writel_gicd(GIC_PRI_IRQ << 24 | GIC_PRI_IRQ << 16 |
  413                     GIC_PRI_IRQ << 8 | GIC_PRI_IRQ,
  414                     GICD_IPRIORITYR + (i / 4) * 4);
  415 
  416     /* Local settings: interface controller */
  417     /* Don't mask by priority */
  418     writel_gicc(0xff, GICC_PMR);
  419     /* Finest granularity of priority */
  420     writel_gicc(0x0, GICC_BPR);
  421     /* Turn on delivery */
  422     writel_gicc(GICC_CTL_ENABLE|GICC_CTL_EOI, GICC_CTLR);
  423 }
  424 
  425 static void gicv2_cpu_disable(void)
  426 {
  427     writel_gicc(0x0, GICC_CTLR);
  428 }
  429 
  430 static void gicv2_hyp_init(void)
  431 {
  432     uint32_t vtr;
  433     uint8_t nr_lrs;
  434 
  435     vtr = readl_gich(GICH_VTR);
  436     nr_lrs  = (vtr & GICH_V2_VTR_NRLRGS) + 1;
  437     gicv2_info.nr_lrs = nr_lrs;
  438 }
  439 
  440 static void gicv2_hyp_disable(void)
  441 {
  442     writel_gich(0, GICH_HCR);
  443 }
  444 
  445 static int gicv2_secondary_cpu_init(void)
  446 {
  447     spin_lock(&gicv2.lock);
  448 
  449     gicv2_cpu_init();
  450     gicv2_hyp_init();
  451 
  452     spin_unlock(&gicv2.lock);
  453 
  454     return 0;
  455 }
  456 
  457 static void gicv2_send_SGI(enum gic_sgi sgi, enum gic_sgi_mode irqmode,
  458                            const cpumask_t *cpu_mask)
  459 {
  460     unsigned int mask = 0;
  461     cpumask_t online_mask;
  462 
  463     /*
  464      * Ensure that stores to Normal memory are visible to the other CPUs
  465      * before they observe us issuing the IPI.
  466      */
  467     dmb(ishst);
  468 
  469     switch ( irqmode )
  470     {
  471     case SGI_TARGET_OTHERS:
  472         writel_gicd(GICD_SGI_TARGET_OTHERS | sgi, GICD_SGIR);
  473         break;
  474     case SGI_TARGET_SELF:
  475         writel_gicd(GICD_SGI_TARGET_SELF | sgi, GICD_SGIR);
  476         break;
  477     case SGI_TARGET_LIST:
  478         cpumask_and(&online_mask, cpu_mask, &cpu_online_map);
  479         mask = gicv2_cpu_mask(&online_mask);
  480         writel_gicd(GICD_SGI_TARGET_LIST |
  481                     (mask << GICD_SGI_TARGET_SHIFT) | sgi,
  482                     GICD_SGIR);
  483         break;
  484     default:
  485         BUG();
  486     }
  487 }
  488 
  489 /* Shut down the per-CPU GIC interface */
  490 static void gicv2_disable_interface(void)
  491 {
  492     spin_lock(&gicv2.lock);
  493     gicv2_cpu_disable();
  494     gicv2_hyp_disable();
  495     spin_unlock(&gicv2.lock);
  496 }
  497 
  498 static void gicv2_update_lr(int lr, unsigned int virq, uint8_t priority,
  499                             unsigned int hw_irq, unsigned int state)
  500 {
  501     uint32_t lr_reg;
  502 
  503     BUG_ON(lr >= gicv2_info.nr_lrs);
  504     BUG_ON(lr < 0);
  505 
  506     lr_reg = (((state & GICH_V2_LR_STATE_MASK) << GICH_V2_LR_STATE_SHIFT)  |
  507               ((GIC_PRI_TO_GUEST(priority) & GICH_V2_LR_PRIORITY_MASK)
  508                                           << GICH_V2_LR_PRIORITY_SHIFT) |
  509               ((virq & GICH_V2_LR_VIRTUAL_MASK) << GICH_V2_LR_VIRTUAL_SHIFT));
  510 
  511     if ( hw_irq != INVALID_IRQ )
  512         lr_reg |= GICH_V2_LR_HW | ((hw_irq & GICH_V2_LR_PHYSICAL_MASK )
  513                                    << GICH_V2_LR_PHYSICAL_SHIFT);
  514 
  515     writel_gich(lr_reg, GICH_LR + lr * 4);
  516 }
  517 
  518 static void gicv2_clear_lr(int lr)
  519 {
  520     writel_gich(0, GICH_LR + lr * 4);
  521 }
  522 
  523 static void gicv2_read_lr(int lr, struct gic_lr *lr_reg)
  524 {
  525     uint32_t lrv;
  526 
  527     lrv          = readl_gich(GICH_LR + lr * 4);
  528     lr_reg->virq = (lrv >> GICH_V2_LR_VIRTUAL_SHIFT) & GICH_V2_LR_VIRTUAL_MASK;
  529     lr_reg->priority = (lrv >> GICH_V2_LR_PRIORITY_SHIFT) & GICH_V2_LR_PRIORITY_MASK;
  530     lr_reg->pending = lrv & GICH_V2_LR_PENDING;
  531     lr_reg->active = lrv & GICH_V2_LR_ACTIVE;
  532     lr_reg->hw_status = lrv & GICH_V2_LR_HW;
  533 
  534     if ( lr_reg->hw_status )
  535     {
  536         lr_reg->hw.pirq = lrv >> GICH_V2_LR_PHYSICAL_SHIFT;
  537         lr_reg->hw.pirq &= GICH_V2_LR_PHYSICAL_MASK;
  538     }
  539     else
  540     {
  541         lr_reg->virt.eoi = (lrv & GICH_V2_LR_MAINTENANCE_IRQ);
  542         /*
  543          * This is only valid for SGI, but it does not matter to always
  544          * read it as it should be 0 by default.
  545          */
  546         lr_reg->virt.source = (lrv >> GICH_V2_LR_CPUID_SHIFT)
  547             & GICH_V2_LR_CPUID_MASK;
  548     }
  549 }
  550 
  551 static void gicv2_write_lr(int lr, const struct gic_lr *lr_reg)
  552 {
  553     uint32_t lrv = 0;
  554 
  555     lrv = (((lr_reg->virq & GICH_V2_LR_VIRTUAL_MASK) << GICH_V2_LR_VIRTUAL_SHIFT)   |
  556           ((uint32_t)(lr_reg->priority & GICH_V2_LR_PRIORITY_MASK)
  557                                       << GICH_V2_LR_PRIORITY_SHIFT) );
  558 
  559     if ( lr_reg->active )
  560         lrv |= GICH_V2_LR_ACTIVE;
  561 
  562     if ( lr_reg->pending )
  563         lrv |= GICH_V2_LR_PENDING;
  564 
  565     if ( lr_reg->hw_status )
  566     {
  567         lrv |= GICH_V2_LR_HW;
  568         lrv |= lr_reg->hw.pirq << GICH_V2_LR_PHYSICAL_SHIFT;
  569     }
  570     else
  571     {
  572         if ( lr_reg->virt.eoi )
  573             lrv |= GICH_V2_LR_MAINTENANCE_IRQ;
  574         /*
  575          * Source is only valid for SGIs, the caller should make sure
  576          * the field virt.source is always 0 for non-SGI.
  577          */
  578         ASSERT(!lr_reg->virt.source || lr_reg->virq < NR_GIC_SGI);
  579         lrv |= (uint32_t)lr_reg->virt.source << GICH_V2_LR_CPUID_SHIFT;
  580     }
  581 
  582     writel_gich(lrv, GICH_LR + lr * 4);
  583 }
  584 
  585 static void gicv2_hcr_status(uint32_t flag, bool status)
  586 {
  587     uint32_t hcr = readl_gich(GICH_HCR);
  588 
  589     if ( status )
  590         hcr |= flag;
  591     else
  592         hcr &= (~flag);
  593 
  594     writel_gich(hcr, GICH_HCR);
  595 }
  596 
  597 static unsigned int gicv2_read_vmcr_priority(void)
  598 {
  599    return ((readl_gich(GICH_VMCR) >> GICH_V2_VMCR_PRIORITY_SHIFT)
  600            & GICH_V2_VMCR_PRIORITY_MASK);
  601 }
  602 
  603 static unsigned int gicv2_read_apr(int apr_reg)
  604 {
  605    return readl_gich(GICH_APR);
  606 }
  607 
  608 static bool gicv2_read_pending_state(struct irq_desc *irqd)
  609 {
  610     return gicv2_peek_irq(irqd, GICD_ISPENDR);
  611 }
  612 
  613 static void gicv2_irq_enable(struct irq_desc *desc)
  614 {
  615     unsigned long flags;
  616 
  617     ASSERT(spin_is_locked(&desc->lock));
  618 
  619     spin_lock_irqsave(&gicv2.lock, flags);
  620     clear_bit(_IRQ_DISABLED, &desc->status);
  621     dsb(sy);
  622     /* Enable routing */
  623     gicv2_poke_irq(desc, GICD_ISENABLER);
  624     spin_unlock_irqrestore(&gicv2.lock, flags);
  625 }
  626 
  627 static void gicv2_irq_disable(struct irq_desc *desc)
  628 {
  629     unsigned long flags;
  630 
  631     ASSERT(spin_is_locked(&desc->lock));
  632 
  633     spin_lock_irqsave(&gicv2.lock, flags);
  634     /* Disable routing */
  635     gicv2_poke_irq(desc, GICD_ICENABLER);
  636     set_bit(_IRQ_DISABLED, &desc->status);
  637     spin_unlock_irqrestore(&gicv2.lock, flags);
  638 }
  639 
  640 static unsigned int gicv2_irq_startup(struct irq_desc *desc)
  641 {
  642     gicv2_irq_enable(desc);
  643 
  644     return 0;
  645 }
  646 
  647 static void gicv2_irq_shutdown(struct irq_desc *desc)
  648 {
  649     gicv2_irq_disable(desc);
  650 }
  651 
  652 static void gicv2_irq_ack(struct irq_desc *desc)
  653 {
  654     /* No ACK -- reading IAR has done this for us */
  655 }
  656 
  657 static void gicv2_host_irq_end(struct irq_desc *desc)
  658 {
  659     /* Lower the priority */
  660     gicv2_eoi_irq(desc);
  661     /* Deactivate */
  662     gicv2_dir_irq(desc);
  663 }
  664 
  665 static void gicv2_guest_irq_end(struct irq_desc *desc)
  666 {
  667     /* Lower the priority of the IRQ */
  668     gicv2_eoi_irq(desc);
  669     /* Deactivation happens in maintenance interrupt / via GICV */
  670 }
  671 
  672 static void gicv2_irq_set_affinity(struct irq_desc *desc, const cpumask_t *cpu_mask)
  673 {
  674     unsigned int mask;
  675 
  676     ASSERT(!cpumask_empty(cpu_mask));
  677 
  678     spin_lock(&gicv2.lock);
  679 
  680     mask = gicv2_cpu_mask(cpu_mask);
  681 
  682     /* Set target CPU mask (RAZ/WI on uniprocessor) */
  683     writeb_gicd(mask, GICD_ITARGETSR + desc->irq);
  684 
  685     spin_unlock(&gicv2.lock);
  686 }
  687 
  688 static int gicv2_map_hwdown_extra_mappings(struct domain *d)
  689 {
  690     const struct v2m_data *v2m_data;
  691 
  692     /* For the moment, we'll assign all v2m frames to the hardware domain. */
  693     list_for_each_entry( v2m_data, &gicv2m_info, entry )
  694     {
  695         int ret;
  696         u32 spi;
  697 
  698         printk("GICv2: Mapping v2m frame to d%d: addr=0x%"PRIpaddr" size=0x%"PRIpaddr" spi_base=%u num_spis=%u\n",
  699                d->domain_id, v2m_data->addr, v2m_data->size,
  700                v2m_data->spi_start, v2m_data->nr_spis);
  701 
  702         ret = map_mmio_regions(d, gaddr_to_gfn(v2m_data->addr),
  703                                PFN_UP(v2m_data->size),
  704                                maddr_to_mfn(v2m_data->addr));
  705         if ( ret )
  706         {
  707             printk(XENLOG_ERR "GICv2: Map v2m frame to d%d failed.\n",
  708                    d->domain_id);
  709             return ret;
  710         }
  711 
  712         /*
  713          * Map all SPIs that are allocated to MSIs for the frame to the
  714          * domain.
  715          */
  716         for ( spi = v2m_data->spi_start;
  717               spi < (v2m_data->spi_start + v2m_data->nr_spis); spi++ )
  718         {
  719             /*
  720              * MSIs are always edge-triggered. Configure the associated SPIs
  721              * to be edge-rising as default type.
  722              */
  723             ret = irq_set_spi_type(spi, IRQ_TYPE_EDGE_RISING);
  724             if ( ret )
  725             {
  726                 printk(XENLOG_ERR
  727                        "GICv2: Failed to set v2m MSI SPI[%d] type.\n", spi);
  728                 return ret;
  729             }
  730 
  731             /* Route a SPI that is allocated to MSI to the domain. */
  732             ret = route_irq_to_guest(d, spi, spi, "v2m");
  733             if ( ret )
  734             {
  735                 printk(XENLOG_ERR
  736                        "GICv2: Failed to route v2m MSI SPI[%d] to Dom%d.\n",
  737                        spi, d->domain_id);
  738                 return ret;
  739             }
  740 
  741             /* Reserve a SPI that is allocated to MSI for the domain. */
  742             if ( !vgic_reserve_virq(d, spi) )
  743             {
  744                 printk(XENLOG_ERR
  745                        "GICv2: Failed to reserve v2m MSI SPI[%d] for Dom%d.\n",
  746                        spi, d->domain_id);
  747                 return -EINVAL;
  748             }
  749         }
  750     }
  751 
  752     return 0;
  753 }
  754 
  755 /*
  756  * Set up gic v2m DT sub-node.
  757  * Please refer to the binding document:
  758  * https://www.kernel.org/doc/Documentation/devicetree/bindings/interrupt-controller/arm,gic.txt
  759  */
  760 static int gicv2m_make_dt_node(const struct domain *d,
  761                                const struct dt_device_node *gic,
  762                                void *fdt)
  763 {
  764     u32 len;
  765     int res;
  766     const void *prop = NULL;
  767     const struct dt_device_node *v2m = NULL;
  768     const struct v2m_data *v2m_data;
  769 
  770     /* It is not necessary to create the node if there are not GICv2m frames */
  771     if ( list_empty(&gicv2m_info) )
  772         return 0;
  773 
  774     /* The sub-nodes require the ranges property */
  775     prop = dt_get_property(gic, "ranges", &len);
  776     if ( !prop )
  777     {
  778         printk(XENLOG_ERR "Can't find ranges property for the gic node\n");
  779         return -FDT_ERR_XEN(ENOENT);
  780     }
  781 
  782     res = fdt_property(fdt, "ranges", prop, len);
  783     if ( res )
  784         return res;
  785 
  786     list_for_each_entry( v2m_data, &gicv2m_info, entry )
  787     {
  788         v2m = v2m_data->dt_node;
  789 
  790         printk("GICv2: Creating v2m DT node for d%d: addr=0x%"PRIpaddr" size=0x%"PRIpaddr" spi_base=%u num_spis=%u\n",
  791                d->domain_id, v2m_data->addr, v2m_data->size,
  792                v2m_data->spi_start, v2m_data->nr_spis);
  793 
  794         res = fdt_begin_node(fdt, v2m->name);
  795         if ( res )
  796             return res;
  797 
  798         res = fdt_property_string(fdt, "compatible", "arm,gic-v2m-frame");
  799         if ( res )
  800             return res;
  801 
  802         res = fdt_property(fdt, "msi-controller", NULL, 0);
  803         if ( res )
  804             return res;
  805 
  806         if ( v2m->phandle )
  807         {
  808             res = fdt_property_cell(fdt, "phandle", v2m->phandle);
  809             if ( res )
  810                 return res;
  811         }
  812 
  813         /* Use the same reg regions as v2m node in host DTB. */
  814         prop = dt_get_property(v2m, "reg", &len);
  815         if ( !prop )
  816         {
  817             printk(XENLOG_ERR "GICv2: Can't find v2m reg property.\n");
  818             res = -FDT_ERR_XEN(ENOENT);
  819             return res;
  820         }
  821 
  822         res = fdt_property(fdt, "reg", prop, len);
  823         if ( res )
  824             return res;
  825 
  826         /*
  827          * The properties msi-base-spi and msi-num-spis are used to override
  828          * the hardware settings. Therefore it is fine to always write them
  829          * in the guest DT.
  830          */
  831         res = fdt_property_u32(fdt, "arm,msi-base-spi", v2m_data->spi_start);
  832         if ( res )
  833         {
  834             printk(XENLOG_ERR
  835                    "GICv2: Failed to create v2m msi-base-spi in Guest DT.\n");
  836             return res;
  837         }
  838 
  839         res = fdt_property_u32(fdt, "arm,msi-num-spis", v2m_data->nr_spis);
  840         if ( res )
  841         {
  842             printk(XENLOG_ERR
  843                    "GICv2: Failed to create v2m msi-num-spis in Guest DT.\n");
  844             return res;
  845         }
  846 
  847         fdt_end_node(fdt);
  848     }
  849 
  850     return res;
  851 }
  852 
  853 static int gicv2_make_hwdom_dt_node(const struct domain *d,
  854                                     const struct dt_device_node *gic,
  855                                     void *fdt)
  856 {
  857     const void *compatible = NULL;
  858     u32 len;
  859     const __be32 *regs;
  860     int res = 0;
  861 
  862     compatible = dt_get_property(gic, "compatible", &len);
  863     if ( !compatible )
  864     {
  865         dprintk(XENLOG_ERR, "Can't find compatible property for the gic node\n");
  866         return -FDT_ERR_XEN(ENOENT);
  867     }
  868 
  869     res = fdt_property(fdt, "compatible", compatible, len);
  870     if ( res )
  871         return res;
  872 
  873     /*
  874      * DTB provides up to 4 regions to handle virtualization
  875      * (in order GICD, GICC, GICH and GICV interfaces)
  876      * however dom0 just needs GICD and GICC provided by Xen.
  877      */
  878     regs = dt_get_property(gic, "reg", &len);
  879     if ( !regs )
  880     {
  881         dprintk(XENLOG_ERR, "Can't find reg property for the gic node\n");
  882         return -FDT_ERR_XEN(ENOENT);
  883     }
  884 
  885     len = dt_cells_to_size(dt_n_addr_cells(gic) + dt_n_size_cells(gic));
  886     len *= 2;
  887 
  888     res = fdt_property(fdt, "reg", regs, len);
  889     if ( res )
  890         return res;
  891 
  892     res = gicv2m_make_dt_node(d, gic, fdt);
  893 
  894     return res;
  895 }
  896 
  897 /* XXX different for level vs edge */
  898 static hw_irq_controller gicv2_host_irq_type = {
  899     .typename     = "gic-v2",
  900     .startup      = gicv2_irq_startup,
  901     .shutdown     = gicv2_irq_shutdown,
  902     .enable       = gicv2_irq_enable,
  903     .disable      = gicv2_irq_disable,
  904     .ack          = gicv2_irq_ack,
  905     .end          = gicv2_host_irq_end,
  906     .set_affinity = gicv2_irq_set_affinity,
  907 };
  908 
  909 static hw_irq_controller gicv2_guest_irq_type = {
  910     .typename     = "gic-v2",
  911     .startup      = gicv2_irq_startup,
  912     .shutdown     = gicv2_irq_shutdown,
  913     .enable       = gicv2_irq_enable,
  914     .disable      = gicv2_irq_disable,
  915     .ack          = gicv2_irq_ack,
  916     .end          = gicv2_guest_irq_end,
  917     .set_affinity = gicv2_irq_set_affinity,
  918 };
  919 
  920 static bool gicv2_is_aliased(paddr_t cbase, paddr_t csize)
  921 {
  922     uint32_t val_low, val_high;
  923 
  924     if ( csize != SZ_128K )
  925         return false;
  926 
  927     /*
  928      * Verify that we have the first 4kB of a GIC400
  929      * aliased over the first 64kB by checking the
  930      * GICC_IIDR register on both ends.
  931      */
  932     val_low = readl_gicc(GICC_IIDR);
  933     val_high = readl_gicc(GICC_IIDR + 0xf000);
  934 
  935     return ((val_low & 0xfff0fff) == 0x0202043B && val_low == val_high);
  936 }
  937 
  938 static void gicv2_add_v2m_frame_to_list(paddr_t addr, paddr_t size,
  939                                         u32 spi_start, u32 nr_spis,
  940                                         const struct dt_device_node *v2m)
  941 {
  942     struct v2m_data *v2m_data;
  943 
  944     /*
  945      * If the hardware setting hasn't been overridden by DT or ACPI, we have
  946      * to read base_spi and num_spis from hardware registers to reserve irqs.
  947      */
  948     if ( !spi_start || !nr_spis )
  949     {
  950         u32 msi_typer;
  951         void __iomem *base;
  952 
  953         base = ioremap_nocache(addr, size);
  954         if ( !base )
  955             panic("GICv2: Cannot remap v2m register frame\n");
  956 
  957         msi_typer = readl_relaxed(base + V2M_MSI_TYPER);
  958         spi_start = V2M_MSI_TYPER_BASE_SPI(msi_typer);
  959         nr_spis = V2M_MSI_TYPER_NUM_SPI(msi_typer);
  960 
  961         iounmap(base);
  962     }
  963 
  964     if ( spi_start < V2M_MIN_SPI )
  965         panic("GICv2: Invalid v2m base SPI:%u\n", spi_start);
  966 
  967     if ( ( nr_spis == 0 ) || ( spi_start + nr_spis > V2M_MAX_SPI ) )
  968         panic("GICv2: Number of v2m SPIs (%u) exceed maximum (%u)\n",
  969               nr_spis, V2M_MAX_SPI - V2M_MIN_SPI + 1);
  970 
  971     /* Allocate an entry to record new v2m frame information. */
  972     v2m_data = xzalloc(struct v2m_data);
  973     if ( !v2m_data )
  974         panic("GICv2: Cannot allocate memory for v2m frame\n");
  975 
  976     INIT_LIST_HEAD(&v2m_data->entry);
  977     v2m_data->addr = addr;
  978     v2m_data->size = size;
  979     v2m_data->spi_start = spi_start;
  980     v2m_data->nr_spis = nr_spis;
  981     v2m_data->dt_node = v2m;
  982 
  983     printk("GICv2m extension register frame:\n"
  984            "        gic_v2m_addr=%"PRIpaddr"\n"
  985            "        gic_v2m_size=%"PRIpaddr"\n"
  986            "        gic_v2m_spi_base=%u\n"
  987            "        gic_v2m_num_spis=%u\n",
  988            v2m_data->addr, v2m_data->size,
  989            v2m_data->spi_start, v2m_data->nr_spis);
  990 
  991     list_add_tail(&v2m_data->entry, &gicv2m_info);
  992 }
  993 
  994 static void gicv2_extension_dt_init(const struct dt_device_node *node)
  995 {
  996     const struct dt_device_node *v2m = NULL;
  997 
  998     /*
  999      * Check whether this GIC implements the v2m extension. If so,
 1000      * add v2m register frames to gicv2m_info.
 1001      */
 1002     dt_for_each_child_node(node, v2m)
 1003     {
 1004         u32 spi_start = 0, nr_spis = 0;
 1005         paddr_t addr, size;
 1006 
 1007         if ( !dt_device_is_compatible(v2m, "arm,gic-v2m-frame") )
 1008             continue;
 1009 
 1010         /* Get register frame resource from DT. */
 1011         if ( dt_device_get_address(v2m, 0, &addr, &size) )
 1012             panic("GICv2: Cannot find a valid v2m frame address\n");
 1013 
 1014         /*
 1015          * Check whether DT uses msi-base-spi and msi-num-spis properties to
 1016          * override the hardware setting.
 1017          */
 1018         if ( dt_property_read_u32(v2m, "arm,msi-base-spi", &spi_start) &&
 1019              dt_property_read_u32(v2m, "arm,msi-num-spis", &nr_spis) )
 1020             printk("GICv2: DT overriding v2m hardware setting (base:%u, num:%u)\n",
 1021                    spi_start, nr_spis);
 1022 
 1023         /* Add this v2m frame information to list. */
 1024         gicv2_add_v2m_frame_to_list(addr, size, spi_start, nr_spis, v2m);
 1025     }
 1026 }
 1027 
 1028 static paddr_t __initdata hbase, dbase, cbase, csize, vbase;
 1029 
 1030 static void __init gicv2_dt_init(void)
 1031 {
 1032     int res;
 1033     paddr_t vsize;
 1034     const struct dt_device_node *node = gicv2_info.node;
 1035 
 1036     res = dt_device_get_address(node, 0, &dbase, NULL);
 1037     if ( res )
 1038         panic("GICv2: Cannot find a valid address for the distributor\n");
 1039 
 1040     res = dt_device_get_address(node, 1, &cbase, &csize);
 1041     if ( res )
 1042         panic("GICv2: Cannot find a valid address for the CPU\n");
 1043 
 1044     res = dt_device_get_address(node, 2, &hbase, NULL);
 1045     if ( res )
 1046         panic("GICv2: Cannot find a valid address for the hypervisor\n");
 1047 
 1048     res = dt_device_get_address(node, 3, &vbase, &vsize);
 1049     if ( res )
 1050         panic("GICv2: Cannot find a valid address for the virtual CPU\n");
 1051 
 1052     res = platform_get_irq(node, 0);
 1053     if ( res < 0 )
 1054         panic("GICv2: Cannot find the maintenance IRQ\n");
 1055     gicv2_info.maintenance_irq = res;
 1056 
 1057     /* TODO: Add check on distributor */
 1058 
 1059     /*
 1060      * The GICv2 CPU interface should at least be 8KB. Although, most of the DT
 1061      * don't correctly set it and use the GICv1 CPU interface size (i.e 4KB).
 1062      * Warn and then fixup.
 1063      */
 1064     if ( csize < SZ_8K )
 1065     {
 1066         printk(XENLOG_WARNING "GICv2: WARNING: "
 1067                "The GICC size is too small: %#"PRIx64" expected %#x\n",
 1068                csize, SZ_8K);
 1069         if ( platform_has_quirk(PLATFORM_QUIRK_GIC_64K_STRIDE) )
 1070         {
 1071             printk(XENLOG_WARNING "GICv2: enable platform quirk: 64K stride\n");
 1072             vsize = csize = SZ_128K;
 1073         } else
 1074             csize = SZ_8K;
 1075     }
 1076 
 1077     /*
 1078      * Check if the CPU interface and virtual CPU interface have the
 1079      * same size.
 1080      */
 1081     if ( csize != vsize )
 1082         panic("GICv2: Sizes of GICC (%#"PRIpaddr") and GICV (%#"PRIpaddr") don't match\n",
 1083                csize, vsize);
 1084 
 1085     /*
 1086      * Check whether this GIC implements the v2m extension. If so,
 1087      * add v2m register frames to gicv2_extension_info.
 1088      */
 1089     gicv2_extension_dt_init(node);
 1090 }
 1091 
 1092 static int gicv2_iomem_deny_access(const struct domain *d)
 1093 {
 1094     int rc;
 1095     unsigned long mfn, nr;
 1096 
 1097     mfn = dbase >> PAGE_SHIFT;
 1098     rc = iomem_deny_access(d, mfn, mfn + 1);
 1099     if ( rc )
 1100         return rc;
 1101 
 1102     mfn = hbase >> PAGE_SHIFT;
 1103     rc = iomem_deny_access(d, mfn, mfn + 1);
 1104     if ( rc )
 1105         return rc;
 1106 
 1107     mfn = cbase >> PAGE_SHIFT;
 1108     nr = DIV_ROUND_UP(csize, PAGE_SIZE);
 1109     rc = iomem_deny_access(d, mfn, mfn + nr);
 1110     if ( rc )
 1111         return rc;
 1112 
 1113     mfn = vbase >> PAGE_SHIFT;
 1114     return iomem_deny_access(d, mfn, mfn + nr);
 1115 }
 1116 
 1117 #ifdef CONFIG_ACPI
 1118 static unsigned long gicv2_get_hwdom_extra_madt_size(const struct domain *d)
 1119 {
 1120     return 0;
 1121 }
 1122 
 1123 static int gicv2_make_hwdom_madt(const struct domain *d, u32 offset)
 1124 {
 1125     struct acpi_subtable_header *header;
 1126     struct acpi_madt_generic_interrupt *host_gicc, *gicc;
 1127     u32 i, size, table_len = 0;
 1128     u8 *base_ptr = d->arch.efi_acpi_table + offset;
 1129 
 1130     header = acpi_table_get_entry_madt(ACPI_MADT_TYPE_GENERIC_INTERRUPT, 0);
 1131     if ( !header )
 1132     {
 1133         printk("Can't get GICC entry");
 1134         return -EINVAL;
 1135     }
 1136 
 1137     host_gicc = container_of(header, struct acpi_madt_generic_interrupt,
 1138                              header);
 1139 
 1140     size = ACPI_MADT_GICC_LENGTH;
 1141     /* Add Generic Interrupt */
 1142     for ( i = 0; i < d->max_vcpus; i++ )
 1143     {
 1144         gicc = (struct acpi_madt_generic_interrupt *)(base_ptr + table_len);
 1145         memcpy(gicc, host_gicc, size);
 1146         gicc->cpu_interface_number = i;
 1147         gicc->uid = i;
 1148         gicc->flags = ACPI_MADT_ENABLED;
 1149         gicc->arm_mpidr = vcpuid_to_vaffinity(i);
 1150         gicc->parking_version = 0;
 1151         gicc->performance_interrupt = 0;
 1152         gicc->gicv_base_address = 0;
 1153         gicc->gich_base_address = 0;
 1154         gicc->vgic_interrupt = 0;
 1155         table_len += size;
 1156     }
 1157 
 1158     return table_len;
 1159 }
 1160 
 1161 static int __init
 1162 gic_acpi_parse_madt_cpu(struct acpi_subtable_header *header,
 1163                         const unsigned long end)
 1164 {
 1165     static int cpu_base_assigned = 0;
 1166     struct acpi_madt_generic_interrupt *processor =
 1167                container_of(header, struct acpi_madt_generic_interrupt, header);
 1168 
 1169     if ( BAD_MADT_GICC_ENTRY(processor, end) )
 1170         return -EINVAL;
 1171 
 1172     /* Read from APIC table and fill up the GIC variables */
 1173     if ( cpu_base_assigned == 0 )
 1174     {
 1175         cbase = processor->base_address;
 1176         csize = SZ_8K;
 1177         hbase = processor->gich_base_address;
 1178         vbase = processor->gicv_base_address;
 1179         gicv2_info.maintenance_irq = processor->vgic_interrupt;
 1180 
 1181         if ( processor->flags & ACPI_MADT_VGIC_IRQ_MODE )
 1182             irq_set_type(gicv2_info.maintenance_irq, IRQ_TYPE_EDGE_BOTH);
 1183         else
 1184             irq_set_type(gicv2_info.maintenance_irq, IRQ_TYPE_LEVEL_MASK);
 1185 
 1186         cpu_base_assigned = 1;
 1187     }
 1188     else
 1189     {
 1190         if ( cbase != processor->base_address
 1191              || hbase != processor->gich_base_address
 1192              || vbase != processor->gicv_base_address
 1193              || gicv2_info.maintenance_irq != processor->vgic_interrupt )
 1194         {
 1195             printk("GICv2: GICC entries are not same in MADT table\n");
 1196             return -EINVAL;
 1197         }
 1198     }
 1199 
 1200     return 0;
 1201 }
 1202 
 1203 static int __init
 1204 gic_acpi_parse_madt_distributor(struct acpi_subtable_header *header,
 1205                                 const unsigned long end)
 1206 {
 1207     struct acpi_madt_generic_distributor *dist =
 1208              container_of(header, struct acpi_madt_generic_distributor, header);
 1209 
 1210     if ( BAD_MADT_ENTRY(dist, end) )
 1211         return -EINVAL;
 1212 
 1213     dbase = dist->base_address;
 1214 
 1215     return 0;
 1216 }
 1217 
 1218 static void __init gicv2_acpi_init(void)
 1219 {
 1220     acpi_status status;
 1221     struct acpi_table_header *table;
 1222     int count;
 1223 
 1224     status = acpi_get_table(ACPI_SIG_MADT, 0, &table);
 1225 
 1226     if ( ACPI_FAILURE(status) )
 1227     {
 1228         const char *msg = acpi_format_exception(status);
 1229 
 1230         panic("GICv2: Failed to get MADT table, %s\n", msg);
 1231     }
 1232 
 1233     /* Collect CPU base addresses */
 1234     count = acpi_parse_entries(ACPI_SIG_MADT, sizeof(struct acpi_table_madt),
 1235                                gic_acpi_parse_madt_cpu, table,
 1236                                ACPI_MADT_TYPE_GENERIC_INTERRUPT, 0);
 1237     if ( count <= 0 )
 1238         panic("GICv2: No valid GICC entries exists\n");
 1239 
 1240     /*
 1241      * Find distributor base address. We expect one distributor entry since
 1242      * ACPI 5.0 spec neither support multi-GIC instances nor GIC cascade.
 1243      */
 1244     count = acpi_parse_entries(ACPI_SIG_MADT, sizeof(struct acpi_table_madt),
 1245                                gic_acpi_parse_madt_distributor, table,
 1246                                ACPI_MADT_TYPE_GENERIC_DISTRIBUTOR, 0);
 1247     if ( count <= 0 )
 1248         panic("GICv2: No valid GICD entries exists\n");
 1249 }
 1250 #else
 1251 static void __init gicv2_acpi_init(void) { }
 1252 #endif
 1253 
 1254 static int __init gicv2_init(void)
 1255 {
 1256     uint32_t aliased_offset = 0;
 1257 
 1258     if ( acpi_disabled )
 1259         gicv2_dt_init();
 1260     else
 1261         gicv2_acpi_init();
 1262 
 1263     printk("GICv2 initialization:\n"
 1264               "        gic_dist_addr=%"PRIpaddr"\n"
 1265               "        gic_cpu_addr=%"PRIpaddr"\n"
 1266               "        gic_hyp_addr=%"PRIpaddr"\n"
 1267               "        gic_vcpu_addr=%"PRIpaddr"\n"
 1268               "        gic_maintenance_irq=%u\n",
 1269               dbase, cbase, hbase, vbase,
 1270               gicv2_info.maintenance_irq);
 1271 
 1272     if ( (dbase & ~PAGE_MASK) || (cbase & ~PAGE_MASK) ||
 1273          (hbase & ~PAGE_MASK) || (vbase & ~PAGE_MASK) )
 1274         panic("GICv2 interfaces not page aligned\n");
 1275 
 1276     gicv2.map_dbase = ioremap_nocache(dbase, PAGE_SIZE);
 1277     if ( !gicv2.map_dbase )
 1278         panic("GICv2: Failed to ioremap for GIC distributor\n");
 1279 
 1280     gicv2.map_cbase = ioremap_nocache(cbase, csize);
 1281     if ( !gicv2.map_cbase )
 1282         panic("GICv2: Failed to ioremap for GIC CPU interface\n");
 1283 
 1284     if ( gicv2_is_aliased(cbase, csize) )
 1285     {
 1286         /*
 1287          * Move the base up by 60kB, so that we have a 8kB contiguous
 1288          * region, which allows us to use GICC_DIR at its
 1289          * normal offset.
 1290          * Note the variable cbase is not updated as we need the original
 1291          * value for the vGICv2 emulation.
 1292          */
 1293         aliased_offset = 0xf000;
 1294 
 1295         gicv2.map_cbase += aliased_offset;
 1296 
 1297         printk(XENLOG_WARNING
 1298                "GICv2: Adjusting CPU interface base to %#"PRIx64"\n",
 1299                cbase + aliased_offset);
 1300     } else if ( csize == SZ_128K )
 1301         printk(XENLOG_WARNING
 1302                "GICv2: GICC size=%#"PRIx64" but not aliased\n",
 1303                csize);
 1304 
 1305     gicv2.map_hbase = ioremap_nocache(hbase, PAGE_SIZE);
 1306     if ( !gicv2.map_hbase )
 1307         panic("GICv2: Failed to ioremap for GIC Virtual interface\n");
 1308 
 1309     vgic_v2_setup_hw(dbase, cbase, csize, vbase, aliased_offset);
 1310 
 1311     /* Global settings: interrupt distributor */
 1312     spin_lock_init(&gicv2.lock);
 1313     spin_lock(&gicv2.lock);
 1314 
 1315     gicv2_dist_init();
 1316     gicv2_cpu_init();
 1317     gicv2_hyp_init();
 1318 
 1319     spin_unlock(&gicv2.lock);
 1320 
 1321     return 0;
 1322 }
 1323 
 1324 static void gicv2_do_LPI(unsigned int lpi)
 1325 {
 1326     /* No LPIs in a GICv2 */
 1327     BUG();
 1328 }
 1329 
 1330 const static struct gic_hw_operations gicv2_ops = {
 1331     .info                = &gicv2_info,
 1332     .init                = gicv2_init,
 1333     .secondary_init      = gicv2_secondary_cpu_init,
 1334     .save_state          = gicv2_save_state,
 1335     .restore_state       = gicv2_restore_state,
 1336     .dump_state          = gicv2_dump_state,
 1337     .gic_host_irq_type   = &gicv2_host_irq_type,
 1338     .gic_guest_irq_type  = &gicv2_guest_irq_type,
 1339     .eoi_irq             = gicv2_eoi_irq,
 1340     .deactivate_irq      = gicv2_dir_irq,
 1341     .read_irq            = gicv2_read_irq,
 1342     .set_active_state    = gicv2_set_active_state,
 1343     .set_pending_state   = gicv2_set_pending_state,
 1344     .set_irq_type        = gicv2_set_irq_type,
 1345     .set_irq_priority    = gicv2_set_irq_priority,
 1346     .send_SGI            = gicv2_send_SGI,
 1347     .disable_interface   = gicv2_disable_interface,
 1348     .update_lr           = gicv2_update_lr,
 1349     .update_hcr_status   = gicv2_hcr_status,
 1350     .clear_lr            = gicv2_clear_lr,
 1351     .read_lr             = gicv2_read_lr,
 1352     .write_lr            = gicv2_write_lr,
 1353     .read_vmcr_priority  = gicv2_read_vmcr_priority,
 1354     .read_apr            = gicv2_read_apr,
 1355     .read_pending_state  = gicv2_read_pending_state,
 1356     .make_hwdom_dt_node  = gicv2_make_hwdom_dt_node,
 1357 #ifdef CONFIG_ACPI
 1358     .make_hwdom_madt     = gicv2_make_hwdom_madt,
 1359     .get_hwdom_extra_madt_size = gicv2_get_hwdom_extra_madt_size,
 1360 #endif
 1361     .map_hwdom_extra_mappings = gicv2_map_hwdown_extra_mappings,
 1362     .iomem_deny_access   = gicv2_iomem_deny_access,
 1363     .do_LPI              = gicv2_do_LPI,
 1364 };
 1365 
 1366 /* Set up the GIC */
 1367 static int __init gicv2_dt_preinit(struct dt_device_node *node,
 1368                                    const void *data)
 1369 {
 1370     gicv2_info.hw_version = GIC_V2;
 1371     gicv2_info.node = node;
 1372     register_gic_ops(&gicv2_ops);
 1373     dt_irq_xlate = gic_irq_xlate;
 1374 
 1375     return 0;
 1376 }
 1377 
 1378 static const struct dt_device_match gicv2_dt_match[] __initconst =
 1379 {
 1380     DT_MATCH_GIC_V2,
 1381     { /* sentinel */ },
 1382 };
 1383 
 1384 DT_DEVICE_START(gicv2, "GICv2", DEVICE_GIC)
 1385         .dt_match = gicv2_dt_match,
 1386         .init = gicv2_dt_preinit,
 1387 DT_DEVICE_END
 1388 
 1389 #ifdef CONFIG_ACPI
 1390 /* Set up the GIC */
 1391 static int __init gicv2_acpi_preinit(const void *data)
 1392 {
 1393     gicv2_info.hw_version = GIC_V2;
 1394     register_gic_ops(&gicv2_ops);
 1395 
 1396     return 0;
 1397 }
 1398 
 1399 ACPI_DEVICE_START(agicv2, "GICv2", DEVICE_GIC)
 1400         .class_type = ACPI_MADT_GIC_VERSION_V2,
 1401         .init = gicv2_acpi_preinit,
 1402 ACPI_DEVICE_END
 1403 #endif
 1404 /*
 1405  * Local variables:
 1406  * mode: C
 1407  * c-file-style: "BSD"
 1408  * c-basic-offset: 4
 1409  * indent-tabs-mode: nil
 1410  * End:
 1411  */